From 15dedf321036ae67c61f35a3ab55de767b07801f Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Fri, 1 Jun 2018 09:09:08 +0200 Subject: New upstream version 18.05 Change-Id: Ib515da8d5db1114f07a5fe26239d1c2061007c1c Signed-off-by: Christian Ehrhardt --- drivers/net/Makefile | 12 +- drivers/net/af_packet/Makefile | 37 +- drivers/net/af_packet/rte_eth_af_packet.c | 175 +- drivers/net/ark/ark_ethdev.c | 13 +- drivers/net/avf/avf_ethdev.c | 34 +- drivers/net/avf/avf_rxtx.c | 9 +- drivers/net/avf/avf_rxtx.h | 10 +- drivers/net/avp/Makefile | 33 +- drivers/net/avp/avp_ethdev.c | 62 +- drivers/net/avp/avp_logs.h | 32 +- drivers/net/avp/rte_avp_common.h | 57 +- drivers/net/avp/rte_avp_fifo.h | 57 +- drivers/net/axgbe/Makefile | 35 + drivers/net/axgbe/axgbe_common.h | 1710 + drivers/net/axgbe/axgbe_dev.c | 1103 + drivers/net/axgbe/axgbe_ethdev.c | 772 + drivers/net/axgbe/axgbe_ethdev.h | 586 + drivers/net/axgbe/axgbe_i2c.c | 331 + drivers/net/axgbe/axgbe_logs.h | 26 + drivers/net/axgbe/axgbe_mdio.c | 1066 + drivers/net/axgbe/axgbe_phy.h | 192 + drivers/net/axgbe/axgbe_phy_impl.c | 2191 ++ drivers/net/axgbe/axgbe_rxtx.c | 674 + drivers/net/axgbe/axgbe_rxtx.h | 186 + drivers/net/axgbe/axgbe_rxtx_vec_sse.c | 93 + drivers/net/axgbe/meson.build | 19 + drivers/net/axgbe/rte_pmd_axgbe_version.map | 4 + drivers/net/bnx2x/LICENSE.bnx2x_pmd | 29 +- drivers/net/bnx2x/Makefile | 9 +- drivers/net/bnx2x/bnx2x.c | 36 +- drivers/net/bnx2x/bnx2x.h | 4 +- drivers/net/bnx2x/bnx2x_ethdev.c | 34 +- drivers/net/bnx2x/bnx2x_ethdev.h | 4 +- drivers/net/bnx2x/bnx2x_logs.h | 4 +- drivers/net/bnx2x/bnx2x_rxtx.c | 15 +- drivers/net/bnx2x/bnx2x_rxtx.h | 4 +- drivers/net/bnx2x/bnx2x_stats.c | 6 +- drivers/net/bnx2x/bnx2x_stats.h | 6 +- drivers/net/bnx2x/bnx2x_vfpf.c | 4 +- drivers/net/bnx2x/bnx2x_vfpf.h | 4 +- drivers/net/bnx2x/ecore_fw_defs.h | 6 +- drivers/net/bnx2x/ecore_hsi.h | 6 +- drivers/net/bnx2x/ecore_init.h | 6 +- drivers/net/bnx2x/ecore_init_ops.h | 6 +- drivers/net/bnx2x/ecore_mfw_req.h | 6 +- drivers/net/bnx2x/ecore_reg.h | 6 +- drivers/net/bnx2x/ecore_sp.c | 6 +- drivers/net/bnx2x/ecore_sp.h | 6 +- drivers/net/bnx2x/elink.c | 350 +- drivers/net/bnx2x/elink.h | 6 +- drivers/net/bnxt/Makefile | 37 +- drivers/net/bnxt/bnxt.h | 42 +- drivers/net/bnxt/bnxt_cpr.c | 119 +- drivers/net/bnxt/bnxt_cpr.h | 39 +- drivers/net/bnxt/bnxt_ethdev.c | 301 +- drivers/net/bnxt/bnxt_filter.c | 171 +- drivers/net/bnxt/bnxt_filter.h | 36 +- drivers/net/bnxt/bnxt_hwrm.c | 254 +- drivers/net/bnxt/bnxt_hwrm.h | 40 +- drivers/net/bnxt/bnxt_irq.c | 62 +- drivers/net/bnxt/bnxt_irq.h | 34 +- drivers/net/bnxt/bnxt_nvm_defs.h | 11 +- drivers/net/bnxt/bnxt_ring.c | 122 +- drivers/net/bnxt/bnxt_ring.h | 39 +- drivers/net/bnxt/bnxt_rxq.c | 59 +- drivers/net/bnxt/bnxt_rxq.h | 36 +- drivers/net/bnxt/bnxt_rxr.c | 75 +- drivers/net/bnxt/bnxt_rxr.h | 56 +- drivers/net/bnxt/bnxt_stats.c | 59 +- drivers/net/bnxt/bnxt_stats.h | 34 +- drivers/net/bnxt/bnxt_txq.c | 49 +- drivers/net/bnxt/bnxt_txq.h | 36 +- drivers/net/bnxt/bnxt_txr.c | 38 +- drivers/net/bnxt/bnxt_txr.h | 34 +- drivers/net/bnxt/bnxt_vnic.c | 44 +- drivers/net/bnxt/bnxt_vnic.h | 35 +- drivers/net/bnxt/hsi_struct_def_dpdk.h | 35456 ++++++++++++++------ drivers/net/bnxt/rte_pmd_bnxt.c | 34 +- drivers/net/bnxt/rte_pmd_bnxt.h | 34 +- drivers/net/bonding/Makefile | 1 + drivers/net/bonding/meson.build | 3 +- drivers/net/bonding/rte_eth_bond_8023ad.c | 126 +- drivers/net/bonding/rte_eth_bond_alb.c | 4 +- drivers/net/bonding/rte_eth_bond_api.c | 79 +- drivers/net/bonding/rte_eth_bond_args.c | 11 +- drivers/net/bonding/rte_eth_bond_flow.c | 228 + drivers/net/bonding/rte_eth_bond_pmd.c | 414 +- drivers/net/bonding/rte_eth_bond_private.h | 32 +- drivers/net/bonding/rte_pmd_bond_version.map | 1 + drivers/net/cxgbe/Makefile | 44 +- drivers/net/cxgbe/base/adapter.h | 75 +- drivers/net/cxgbe/base/common.h | 168 +- drivers/net/cxgbe/base/t4_chip_type.h | 34 +- drivers/net/cxgbe/base/t4_hw.c | 783 +- drivers/net/cxgbe/base/t4_hw.h | 34 +- drivers/net/cxgbe/base/t4_msg.h | 34 +- drivers/net/cxgbe/base/t4_pci_id_tbl.h | 34 +- drivers/net/cxgbe/base/t4_regs.h | 141 +- drivers/net/cxgbe/base/t4_regs_values.h | 34 +- drivers/net/cxgbe/base/t4fw_interface.h | 401 +- drivers/net/cxgbe/base/t4vf_hw.c | 874 + drivers/net/cxgbe/base/t4vf_hw.h | 15 + drivers/net/cxgbe/cxgbe.h | 51 +- drivers/net/cxgbe/cxgbe_compat.h | 34 +- drivers/net/cxgbe/cxgbe_ethdev.c | 358 +- drivers/net/cxgbe/cxgbe_main.c | 332 +- drivers/net/cxgbe/cxgbe_pfvf.h | 43 + drivers/net/cxgbe/cxgbevf_ethdev.c | 198 + drivers/net/cxgbe/cxgbevf_main.c | 311 + drivers/net/cxgbe/sge.c | 426 +- drivers/net/dpaa/Makefile | 3 + drivers/net/dpaa/dpaa_ethdev.c | 112 +- drivers/net/dpaa/dpaa_ethdev.h | 16 +- drivers/net/dpaa/dpaa_rxtx.c | 21 +- drivers/net/dpaa/meson.build | 14 + drivers/net/dpaa/rte_pmd_dpaa_version.map | 4 +- drivers/net/dpaa2/Makefile | 10 +- drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 30 +- drivers/net/dpaa2/dpaa2_ethdev.c | 504 +- drivers/net/dpaa2/dpaa2_ethdev.h | 6 + drivers/net/dpaa2/dpaa2_pmd_logs.h | 41 + drivers/net/dpaa2/dpaa2_rxtx.c | 165 +- drivers/net/dpaa2/meson.build | 18 + drivers/net/e1000/Makefile | 4 +- drivers/net/e1000/base/e1000_82575.c | 5 + drivers/net/e1000/base/e1000_defines.h | 1 + drivers/net/e1000/base/e1000_phy.h | 8 + drivers/net/e1000/e1000_ethdev.h | 27 +- drivers/net/e1000/e1000_logs.c | 26 + drivers/net/e1000/e1000_logs.h | 6 + drivers/net/e1000/em_ethdev.c | 146 +- drivers/net/e1000/em_rxtx.c | 103 +- drivers/net/e1000/igb_ethdev.c | 245 +- drivers/net/e1000/igb_flow.c | 110 +- drivers/net/e1000/igb_rxtx.c | 171 +- drivers/net/e1000/meson.build | 1 + drivers/net/ena/Makefile | 3 + drivers/net/ena/base/ena_plat_dpdk.h | 9 +- drivers/net/ena/ena_ethdev.c | 79 +- drivers/net/enic/base/vnic_dev.c | 72 +- drivers/net/enic/base/vnic_dev.h | 8 +- drivers/net/enic/base/vnic_devcmd.h | 15 + drivers/net/enic/base/vnic_enet.h | 4 + drivers/net/enic/base/vnic_nic.h | 2 + drivers/net/enic/base/vnic_rq.h | 2 + drivers/net/enic/base/vnic_wq.h | 1 + drivers/net/enic/enic.h | 132 +- drivers/net/enic/enic_clsf.c | 21 +- drivers/net/enic/enic_ethdev.c | 323 +- drivers/net/enic/enic_flow.c | 87 +- drivers/net/enic/enic_main.c | 502 +- drivers/net/enic/enic_res.c | 85 +- drivers/net/enic/enic_res.h | 6 + drivers/net/enic/enic_rxtx.c | 106 +- drivers/net/enic/meson.build | 19 + drivers/net/failsafe/Makefile | 33 +- drivers/net/failsafe/failsafe.c | 53 +- drivers/net/failsafe/failsafe_args.c | 9 +- drivers/net/failsafe/failsafe_eal.c | 61 +- drivers/net/failsafe/failsafe_ether.c | 55 +- drivers/net/failsafe/failsafe_flow.c | 6 +- drivers/net/failsafe/failsafe_intr.c | 2 +- drivers/net/failsafe/failsafe_ops.c | 151 +- drivers/net/failsafe/failsafe_private.h | 18 +- drivers/net/failsafe/failsafe_rxtx.c | 2 +- drivers/net/fm10k/Makefile | 3 +- drivers/net/fm10k/fm10k.h | 3 +- drivers/net/fm10k/fm10k_ethdev.c | 101 +- drivers/net/fm10k/fm10k_rxtx_vec.c | 6 +- drivers/net/i40e/Makefile | 5 +- drivers/net/i40e/base/i40e_register.h | 24 +- drivers/net/i40e/i40e_ethdev.c | 678 +- drivers/net/i40e/i40e_ethdev.h | 48 +- drivers/net/i40e/i40e_ethdev_vf.c | 172 +- drivers/net/i40e/i40e_fdir.c | 2 +- drivers/net/i40e/i40e_flow.c | 217 +- drivers/net/i40e/i40e_rxtx.c | 238 +- drivers/net/i40e/i40e_rxtx.h | 3 +- drivers/net/i40e/i40e_rxtx_vec_common.h | 4 +- drivers/net/i40e/i40e_rxtx_vec_neon.c | 35 +- drivers/net/i40e/i40e_vf_representor.c | 531 + drivers/net/i40e/meson.build | 12 +- drivers/net/i40e/rte_pmd_i40e.c | 88 +- drivers/net/i40e/rte_pmd_i40e.h | 18 + drivers/net/ifc/Makefile | 35 + drivers/net/ifc/base/ifcvf.c | 298 + drivers/net/ifc/base/ifcvf.h | 154 + drivers/net/ifc/base/ifcvf_osdep.h | 52 + drivers/net/ifc/ifcvf_vdpa.c | 792 + drivers/net/ifc/rte_ifcvf_version.map | 4 + drivers/net/ixgbe/Makefile | 6 +- drivers/net/ixgbe/ixgbe_ethdev.c | 737 +- drivers/net/ixgbe/ixgbe_ethdev.h | 32 +- drivers/net/ixgbe/ixgbe_flow.c | 197 +- drivers/net/ixgbe/ixgbe_ipsec.c | 13 +- drivers/net/ixgbe/ixgbe_pf.c | 12 +- drivers/net/ixgbe/ixgbe_rxtx.c | 282 +- drivers/net/ixgbe/ixgbe_rxtx.h | 24 +- drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 5 - drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 2 +- drivers/net/ixgbe/ixgbe_vf_representor.c | 231 + drivers/net/ixgbe/meson.build | 3 + drivers/net/ixgbe/rte_pmd_ixgbe.c | 230 + drivers/net/ixgbe/rte_pmd_ixgbe.h | 84 + drivers/net/ixgbe/rte_pmd_ixgbe_version.map | 10 + drivers/net/kni/Makefile | 1 + drivers/net/kni/rte_eth_kni.c | 66 +- drivers/net/liquidio/lio_ethdev.c | 72 +- drivers/net/meson.build | 8 +- drivers/net/mlx4/Makefile | 43 +- drivers/net/mlx4/mlx4.c | 202 +- drivers/net/mlx4/mlx4.h | 51 +- drivers/net/mlx4/mlx4_ethdev.c | 208 +- drivers/net/mlx4/mlx4_flow.c | 240 +- drivers/net/mlx4/mlx4_flow.h | 6 +- drivers/net/mlx4/mlx4_glue.c | 2 +- drivers/net/mlx4/mlx4_glue.h | 2 +- drivers/net/mlx4/mlx4_intr.c | 2 +- drivers/net/mlx4/mlx4_mr.c | 1247 +- drivers/net/mlx4/mlx4_mr.h | 122 + drivers/net/mlx4/mlx4_prm.h | 2 +- drivers/net/mlx4/mlx4_rxq.c | 99 +- drivers/net/mlx4/mlx4_rxtx.c | 44 +- drivers/net/mlx4/mlx4_rxtx.h | 92 +- drivers/net/mlx4/mlx4_txq.c | 122 +- drivers/net/mlx4/mlx4_utils.c | 2 +- drivers/net/mlx4/mlx4_utils.h | 2 +- drivers/net/mlx5/Makefile | 67 +- drivers/net/mlx5/mlx5.c | 796 +- drivers/net/mlx5/mlx5.h | 315 +- drivers/net/mlx5/mlx5_defs.h | 48 +- drivers/net/mlx5/mlx5_ethdev.c | 1039 +- drivers/net/mlx5/mlx5_flow.c | 2215 +- drivers/net/mlx5/mlx5_glue.c | 40 +- drivers/net/mlx5/mlx5_glue.h | 18 +- drivers/net/mlx5/mlx5_mac.c | 159 +- drivers/net/mlx5/mlx5_mr.c | 1325 +- drivers/net/mlx5/mlx5_mr.h | 117 + drivers/net/mlx5/mlx5_nl.c | 627 + drivers/net/mlx5/mlx5_prm.h | 41 +- drivers/net/mlx5/mlx5_rss.c | 178 +- drivers/net/mlx5/mlx5_rxmode.c | 38 +- drivers/net/mlx5/mlx5_rxq.c | 1354 +- drivers/net/mlx5/mlx5_rxtx.c | 668 +- drivers/net/mlx5/mlx5_rxtx.h | 457 +- drivers/net/mlx5/mlx5_rxtx_vec.c | 40 +- drivers/net/mlx5/mlx5_rxtx_vec.h | 11 +- drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 37 +- drivers/net/mlx5/mlx5_rxtx_vec_sse.h | 21 +- drivers/net/mlx5/mlx5_socket.c | 173 +- drivers/net/mlx5/mlx5_stats.c | 246 +- drivers/net/mlx5/mlx5_trigger.c | 274 +- drivers/net/mlx5/mlx5_txq.c | 422 +- drivers/net/mlx5/mlx5_utils.h | 31 +- drivers/net/mlx5/mlx5_vlan.c | 114 +- drivers/net/mrvl/Makefile | 68 - drivers/net/mrvl/mrvl_ethdev.c | 2511 -- drivers/net/mrvl/mrvl_ethdev.h | 118 - drivers/net/mrvl/mrvl_qos.c | 636 - drivers/net/mrvl/mrvl_qos.h | 113 - drivers/net/mrvl/rte_pmd_mrvl_version.map | 3 - drivers/net/mvpp2/Makefile | 42 + drivers/net/mvpp2/meson.build | 25 + drivers/net/mvpp2/mrvl_ethdev.c | 2747 ++ drivers/net/mvpp2/mrvl_ethdev.h | 101 + drivers/net/mvpp2/mrvl_flow.c | 2779 ++ drivers/net/mvpp2/mrvl_qos.c | 894 + drivers/net/mvpp2/mrvl_qos.h | 107 + drivers/net/mvpp2/rte_pmd_mvpp2_version.map | 3 + drivers/net/nfp/Makefile | 17 +- drivers/net/nfp/nfp_net.c | 850 +- drivers/net/nfp/nfp_net_ctrl.h | 6 + drivers/net/nfp/nfp_net_eth.h | 82 - drivers/net/nfp/nfp_net_logs.h | 9 +- drivers/net/nfp/nfp_net_pmd.h | 46 +- drivers/net/nfp/nfp_nfpu.c | 108 - drivers/net/nfp/nfp_nfpu.h | 55 - drivers/net/nfp/nfp_nspu.c | 642 - drivers/net/nfp/nfp_nspu.h | 83 - drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h | 722 + drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h | 36 + drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h | 592 + drivers/net/nfp/nfpcore/nfp6000/nfp6000.h | 40 + drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h | 26 + drivers/net/nfp/nfpcore/nfp_cpp.h | 779 + drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c | 941 + drivers/net/nfp/nfpcore/nfp_cppcore.c | 857 + drivers/net/nfp/nfpcore/nfp_crc.c | 49 + drivers/net/nfp/nfpcore/nfp_crc.h | 19 + drivers/net/nfp/nfpcore/nfp_hwinfo.c | 199 + drivers/net/nfp/nfpcore/nfp_hwinfo.h | 85 + drivers/net/nfp/nfpcore/nfp_mip.c | 154 + drivers/net/nfp/nfpcore/nfp_mip.h | 21 + drivers/net/nfp/nfpcore/nfp_mutex.c | 424 + drivers/net/nfp/nfpcore/nfp_nffw.c | 235 + drivers/net/nfp/nfpcore/nfp_nffw.h | 86 + drivers/net/nfp/nfpcore/nfp_nsp.c | 427 + drivers/net/nfp/nfpcore/nfp_nsp.h | 304 + drivers/net/nfp/nfpcore/nfp_nsp_cmds.c | 109 + drivers/net/nfp/nfpcore/nfp_nsp_eth.c | 665 + drivers/net/nfp/nfpcore/nfp_resource.c | 266 + drivers/net/nfp/nfpcore/nfp_resource.h | 52 + drivers/net/nfp/nfpcore/nfp_rtsym.c | 327 + drivers/net/nfp/nfpcore/nfp_rtsym.h | 61 + drivers/net/nfp/nfpcore/nfp_target.h | 579 + drivers/net/null/meson.build | 1 + drivers/net/null/rte_eth_null.c | 82 +- drivers/net/octeontx/Makefile | 3 +- drivers/net/octeontx/base/octeontx_bgx.c | 22 +- drivers/net/octeontx/base/octeontx_pkivf.c | 10 +- drivers/net/octeontx/base/octeontx_pkivf.h | 10 +- drivers/net/octeontx/octeontx_ethdev.c | 102 +- drivers/net/octeontx/octeontx_ethdev.h | 4 + drivers/net/pcap/meson.build | 20 +- drivers/net/pcap/rte_eth_pcap.c | 75 +- drivers/net/qede/LICENSE.qede_pmd | 29 +- drivers/net/qede/Makefile | 7 +- drivers/net/qede/base/bcm_osal.c | 15 +- drivers/net/qede/base/bcm_osal.h | 4 +- drivers/net/qede/base/common_hsi.h | 12 +- drivers/net/qede/base/ecore.h | 32 +- drivers/net/qede/base/ecore_attn_values.h | 4 +- drivers/net/qede/base/ecore_chain.h | 53 +- drivers/net/qede/base/ecore_cxt.c | 34 +- drivers/net/qede/base/ecore_cxt.h | 8 +- drivers/net/qede/base/ecore_cxt_api.h | 4 +- drivers/net/qede/base/ecore_dcbx.c | 11 +- drivers/net/qede/base/ecore_dcbx.h | 4 +- drivers/net/qede/base/ecore_dcbx_api.h | 5 +- drivers/net/qede/base/ecore_dev.c | 330 +- drivers/net/qede/base/ecore_dev_api.h | 24 +- drivers/net/qede/base/ecore_gtt_reg_addr.h | 4 +- drivers/net/qede/base/ecore_gtt_values.h | 4 +- drivers/net/qede/base/ecore_hsi_common.h | 110 +- drivers/net/qede/base/ecore_hsi_debug_tools.h | 112 +- drivers/net/qede/base/ecore_hsi_eth.h | 33 +- drivers/net/qede/base/ecore_hsi_init_func.h | 40 +- drivers/net/qede/base/ecore_hsi_init_tool.h | 111 +- drivers/net/qede/base/ecore_hw.c | 126 +- drivers/net/qede/base/ecore_hw.h | 8 +- drivers/net/qede/base/ecore_hw_defs.h | 4 +- drivers/net/qede/base/ecore_init_fw_funcs.c | 214 +- drivers/net/qede/base/ecore_init_fw_funcs.h | 37 +- drivers/net/qede/base/ecore_init_ops.c | 36 +- drivers/net/qede/base/ecore_init_ops.h | 4 +- drivers/net/qede/base/ecore_int.c | 38 +- drivers/net/qede/base/ecore_int.h | 7 +- drivers/net/qede/base/ecore_int_api.h | 4 +- drivers/net/qede/base/ecore_iov_api.h | 15 +- drivers/net/qede/base/ecore_iro.h | 4 +- drivers/net/qede/base/ecore_iro_values.h | 68 +- drivers/net/qede/base/ecore_l2.c | 69 +- drivers/net/qede/base/ecore_l2.h | 4 +- drivers/net/qede/base/ecore_l2_api.h | 6 +- drivers/net/qede/base/ecore_mcp.c | 224 +- drivers/net/qede/base/ecore_mcp.h | 4 +- drivers/net/qede/base/ecore_mcp_api.h | 50 +- drivers/net/qede/base/ecore_mng_tlv.c | 8 + drivers/net/qede/base/ecore_proto_if.h | 7 +- drivers/net/qede/base/ecore_rt_defs.h | 595 +- drivers/net/qede/base/ecore_sp_api.h | 4 +- drivers/net/qede/base/ecore_sp_commands.c | 33 +- drivers/net/qede/base/ecore_sp_commands.h | 4 +- drivers/net/qede/base/ecore_spq.c | 40 +- drivers/net/qede/base/ecore_spq.h | 17 +- drivers/net/qede/base/ecore_sriov.c | 48 +- drivers/net/qede/base/ecore_sriov.h | 4 +- drivers/net/qede/base/ecore_status.h | 4 +- drivers/net/qede/base/ecore_utils.h | 4 +- drivers/net/qede/base/ecore_vf.c | 9 +- drivers/net/qede/base/ecore_vf.h | 4 +- drivers/net/qede/base/ecore_vf_api.h | 4 +- drivers/net/qede/base/ecore_vfpf_if.h | 12 +- drivers/net/qede/base/eth_common.h | 7 +- drivers/net/qede/base/mcp_public.h | 7 +- drivers/net/qede/base/nvm_cfg.h | 4 +- drivers/net/qede/base/reg_addr.h | 14 +- drivers/net/qede/qede_ethdev.c | 392 +- drivers/net/qede/qede_ethdev.h | 13 +- drivers/net/qede/qede_fdir.c | 8 +- drivers/net/qede/qede_if.h | 4 +- drivers/net/qede/qede_logs.h | 4 +- drivers/net/qede/qede_main.c | 13 +- drivers/net/qede/qede_rxtx.c | 24 +- drivers/net/qede/qede_rxtx.h | 9 +- drivers/net/ring/meson.build | 1 + drivers/net/ring/rte_eth_ring.c | 66 +- drivers/net/sfc/Makefile | 7 +- drivers/net/sfc/base/ef10_ev.c | 112 +- drivers/net/sfc/base/ef10_filter.c | 163 +- drivers/net/sfc/base/ef10_image.c | 885 + drivers/net/sfc/base/ef10_impl.h | 74 +- drivers/net/sfc/base/ef10_intr.c | 13 +- drivers/net/sfc/base/ef10_mac.c | 193 +- drivers/net/sfc/base/ef10_mcdi.c | 25 +- drivers/net/sfc/base/ef10_nic.c | 923 +- drivers/net/sfc/base/ef10_nvram.c | 33 +- drivers/net/sfc/base/ef10_phy.c | 126 +- drivers/net/sfc/base/ef10_rx.c | 184 +- drivers/net/sfc/base/ef10_signed_image_layout.h | 62 + drivers/net/sfc/base/ef10_tlv_layout.h | 115 +- drivers/net/sfc/base/ef10_tx.c | 67 +- drivers/net/sfc/base/ef10_vpd.c | 37 +- drivers/net/sfc/base/efx.h | 425 +- drivers/net/sfc/base/efx_bootcfg.c | 89 +- drivers/net/sfc/base/efx_check.h | 122 +- drivers/net/sfc/base/efx_ev.c | 10 +- drivers/net/sfc/base/efx_filter.c | 71 +- drivers/net/sfc/base/efx_impl.h | 134 +- drivers/net/sfc/base/efx_intr.c | 21 +- drivers/net/sfc/base/efx_lic.c | 26 +- drivers/net/sfc/base/efx_mac.c | 38 +- drivers/net/sfc/base/efx_mcdi.c | 95 +- drivers/net/sfc/base/efx_mcdi.h | 4 +- drivers/net/sfc/base/efx_mon.c | 6 +- drivers/net/sfc/base/efx_nic.c | 304 +- drivers/net/sfc/base/efx_nvram.c | 10 +- drivers/net/sfc/base/efx_phy.c | 14 +- drivers/net/sfc/base/efx_port.c | 5 +- drivers/net/sfc/base/efx_regs_ef10.h | 230 +- drivers/net/sfc/base/efx_regs_mcdi.h | 9493 ++++-- drivers/net/sfc/base/efx_regs_mcdi_aoe.h | 2914 ++ drivers/net/sfc/base/efx_rx.c | 245 +- drivers/net/sfc/base/efx_sram.c | 14 +- drivers/net/sfc/base/efx_tunnel.c | 32 +- drivers/net/sfc/base/efx_tx.c | 56 +- drivers/net/sfc/base/efx_types.h | 38 +- drivers/net/sfc/base/efx_vpd.c | 10 +- drivers/net/sfc/base/hunt_nic.c | 172 +- drivers/net/sfc/base/mcdi_mon.c | 9 + drivers/net/sfc/base/medford2_impl.h | 35 + drivers/net/sfc/base/medford2_nic.c | 162 + drivers/net/sfc/base/medford_nic.c | 240 +- drivers/net/sfc/base/meson.build | 4 +- drivers/net/sfc/base/siena_flash.h | 9 +- drivers/net/sfc/base/siena_mac.c | 31 +- drivers/net/sfc/base/siena_mcdi.c | 12 +- drivers/net/sfc/base/siena_nic.c | 24 + drivers/net/sfc/base/siena_nvram.c | 17 +- drivers/net/sfc/base/siena_phy.c | 9 +- drivers/net/sfc/base/siena_vpd.c | 25 +- drivers/net/sfc/efsys.h | 17 +- drivers/net/sfc/meson.build | 5 +- drivers/net/sfc/sfc.c | 350 +- drivers/net/sfc/sfc.h | 41 +- drivers/net/sfc/sfc_dp.c | 5 +- drivers/net/sfc/sfc_dp.h | 9 +- drivers/net/sfc/sfc_dp_rx.h | 29 +- drivers/net/sfc/sfc_dp_tx.h | 2 + drivers/net/sfc/sfc_ef10.h | 34 + drivers/net/sfc/sfc_ef10_essb_rx.c | 700 + drivers/net/sfc/sfc_ef10_rx.c | 181 +- drivers/net/sfc/sfc_ef10_rx_ev.h | 169 + drivers/net/sfc/sfc_ef10_tx.c | 2 +- drivers/net/sfc/sfc_ethdev.c | 223 +- drivers/net/sfc/sfc_ev.c | 56 +- drivers/net/sfc/sfc_flow.c | 1243 +- drivers/net/sfc/sfc_flow.h | 23 +- drivers/net/sfc/sfc_intr.c | 6 +- drivers/net/sfc/sfc_kvargs.c | 4 +- drivers/net/sfc/sfc_kvargs.h | 24 +- drivers/net/sfc/sfc_log.h | 77 +- drivers/net/sfc/sfc_mcdi.c | 25 +- drivers/net/sfc/sfc_port.c | 68 +- drivers/net/sfc/sfc_rx.c | 398 +- drivers/net/sfc/sfc_rx.h | 11 +- drivers/net/sfc/sfc_tso.c | 3 +- drivers/net/sfc/sfc_tweak.h | 8 + drivers/net/sfc/sfc_tx.c | 73 +- drivers/net/softnic/rte_eth_softnic.c | 45 +- drivers/net/softnic/rte_eth_softnic_tm.c | 23 +- drivers/net/szedata2/Makefile | 33 +- drivers/net/szedata2/rte_eth_szedata2.c | 930 +- drivers/net/szedata2/rte_eth_szedata2.h | 37 +- drivers/net/szedata2/szedata2_iobuf.c | 203 - drivers/net/szedata2/szedata2_iobuf.h | 356 - drivers/net/szedata2/szedata2_logs.h | 22 + drivers/net/tap/rte_eth_tap.c | 645 +- drivers/net/tap/rte_eth_tap.h | 13 +- drivers/net/tap/tap_bpf.h | 2 +- drivers/net/tap/tap_bpf_api.c | 2 +- drivers/net/tap/tap_bpf_insns.h | 2 +- drivers/net/tap/tap_bpf_program.c | 4 +- drivers/net/tap/tap_flow.c | 126 +- drivers/net/tap/tap_flow.h | 2 +- drivers/net/tap/tap_intr.c | 4 +- drivers/net/tap/tap_log.h | 10 + drivers/net/tap/tap_netlink.c | 20 +- drivers/net/tap/tap_netlink.h | 2 +- drivers/net/tap/tap_rss.h | 8 +- drivers/net/tap/tap_tcmsgs.c | 11 +- drivers/net/tap/tap_tcmsgs.h | 2 +- drivers/net/thunderx/base/nicvf_hw_defs.h | 5 +- drivers/net/thunderx/nicvf_ethdev.c | 141 +- drivers/net/vdev_netvsc/Makefile | 2 +- drivers/net/vdev_netvsc/vdev_netvsc.c | 275 +- drivers/net/vhost/rte_eth_vhost.c | 396 +- drivers/net/vhost/rte_eth_vhost.h | 35 +- drivers/net/virtio/meson.build | 27 + drivers/net/virtio/virtio_ethdev.c | 166 +- drivers/net/virtio/virtio_ethdev.h | 8 + drivers/net/virtio/virtio_rxtx.c | 9 +- drivers/net/virtio/virtio_user/vhost_kernel.c | 86 +- drivers/net/virtio/virtio_user/vhost_kernel_tap.c | 14 +- drivers/net/virtio/virtio_user/vhost_kernel_tap.h | 3 +- drivers/net/virtio/virtio_user/vhost_user.c | 76 +- drivers/net/virtio/virtio_user/virtio_user_dev.c | 175 +- drivers/net/virtio/virtio_user/virtio_user_dev.h | 8 +- drivers/net/virtio/virtio_user_ethdev.c | 131 +- drivers/net/vmxnet3/Makefile | 4 +- drivers/net/vmxnet3/base/upt1_defs.h | 7 +- drivers/net/vmxnet3/base/vmxnet3_defs.h | 34 +- drivers/net/vmxnet3/vmxnet3_ethdev.c | 150 +- drivers/net/vmxnet3/vmxnet3_ethdev.h | 1 + drivers/net/vmxnet3/vmxnet3_ring.h | 2 +- drivers/net/vmxnet3/vmxnet3_rxtx.c | 212 +- 516 files changed, 92720 insertions(+), 32764 deletions(-) create mode 100644 drivers/net/axgbe/Makefile create mode 100644 drivers/net/axgbe/axgbe_common.h create mode 100644 drivers/net/axgbe/axgbe_dev.c create mode 100644 drivers/net/axgbe/axgbe_ethdev.c create mode 100644 drivers/net/axgbe/axgbe_ethdev.h create mode 100644 drivers/net/axgbe/axgbe_i2c.c create mode 100644 drivers/net/axgbe/axgbe_logs.h create mode 100644 drivers/net/axgbe/axgbe_mdio.c create mode 100644 drivers/net/axgbe/axgbe_phy.h create mode 100644 drivers/net/axgbe/axgbe_phy_impl.c create mode 100644 drivers/net/axgbe/axgbe_rxtx.c create mode 100644 drivers/net/axgbe/axgbe_rxtx.h create mode 100644 drivers/net/axgbe/axgbe_rxtx_vec_sse.c create mode 100644 drivers/net/axgbe/meson.build create mode 100644 drivers/net/axgbe/rte_pmd_axgbe_version.map create mode 100644 drivers/net/bonding/rte_eth_bond_flow.c create mode 100644 drivers/net/cxgbe/base/t4vf_hw.c create mode 100644 drivers/net/cxgbe/base/t4vf_hw.h create mode 100644 drivers/net/cxgbe/cxgbe_pfvf.h create mode 100644 drivers/net/cxgbe/cxgbevf_ethdev.c create mode 100644 drivers/net/cxgbe/cxgbevf_main.c create mode 100644 drivers/net/dpaa/meson.build create mode 100644 drivers/net/dpaa2/dpaa2_pmd_logs.h create mode 100644 drivers/net/dpaa2/meson.build create mode 100644 drivers/net/e1000/e1000_logs.c create mode 100644 drivers/net/enic/meson.build create mode 100644 drivers/net/i40e/i40e_vf_representor.c create mode 100644 drivers/net/ifc/Makefile create mode 100644 drivers/net/ifc/base/ifcvf.c create mode 100644 drivers/net/ifc/base/ifcvf.h create mode 100644 drivers/net/ifc/base/ifcvf_osdep.h create mode 100644 drivers/net/ifc/ifcvf_vdpa.c create mode 100644 drivers/net/ifc/rte_ifcvf_version.map create mode 100644 drivers/net/ixgbe/ixgbe_vf_representor.c create mode 100644 drivers/net/mlx4/mlx4_mr.h create mode 100644 drivers/net/mlx5/mlx5_mr.h create mode 100644 drivers/net/mlx5/mlx5_nl.c delete mode 100644 drivers/net/mrvl/Makefile delete mode 100644 drivers/net/mrvl/mrvl_ethdev.c delete mode 100644 drivers/net/mrvl/mrvl_ethdev.h delete mode 100644 drivers/net/mrvl/mrvl_qos.c delete mode 100644 drivers/net/mrvl/mrvl_qos.h delete mode 100644 drivers/net/mrvl/rte_pmd_mrvl_version.map create mode 100644 drivers/net/mvpp2/Makefile create mode 100644 drivers/net/mvpp2/meson.build create mode 100644 drivers/net/mvpp2/mrvl_ethdev.c create mode 100644 drivers/net/mvpp2/mrvl_ethdev.h create mode 100644 drivers/net/mvpp2/mrvl_flow.c create mode 100644 drivers/net/mvpp2/mrvl_qos.c create mode 100644 drivers/net/mvpp2/mrvl_qos.h create mode 100644 drivers/net/mvpp2/rte_pmd_mvpp2_version.map delete mode 100644 drivers/net/nfp/nfp_net_eth.h delete mode 100644 drivers/net/nfp/nfp_nfpu.c delete mode 100644 drivers/net/nfp/nfp_nfpu.h delete mode 100644 drivers/net/nfp/nfp_nspu.c delete mode 100644 drivers/net/nfp/nfp_nspu.h create mode 100644 drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h create mode 100644 drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h create mode 100644 drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h create mode 100644 drivers/net/nfp/nfpcore/nfp6000/nfp6000.h create mode 100644 drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h create mode 100644 drivers/net/nfp/nfpcore/nfp_cpp.h create mode 100644 drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c create mode 100644 drivers/net/nfp/nfpcore/nfp_cppcore.c create mode 100644 drivers/net/nfp/nfpcore/nfp_crc.c create mode 100644 drivers/net/nfp/nfpcore/nfp_crc.h create mode 100644 drivers/net/nfp/nfpcore/nfp_hwinfo.c create mode 100644 drivers/net/nfp/nfpcore/nfp_hwinfo.h create mode 100644 drivers/net/nfp/nfpcore/nfp_mip.c create mode 100644 drivers/net/nfp/nfpcore/nfp_mip.h create mode 100644 drivers/net/nfp/nfpcore/nfp_mutex.c create mode 100644 drivers/net/nfp/nfpcore/nfp_nffw.c create mode 100644 drivers/net/nfp/nfpcore/nfp_nffw.h create mode 100644 drivers/net/nfp/nfpcore/nfp_nsp.c create mode 100644 drivers/net/nfp/nfpcore/nfp_nsp.h create mode 100644 drivers/net/nfp/nfpcore/nfp_nsp_cmds.c create mode 100644 drivers/net/nfp/nfpcore/nfp_nsp_eth.c create mode 100644 drivers/net/nfp/nfpcore/nfp_resource.c create mode 100644 drivers/net/nfp/nfpcore/nfp_resource.h create mode 100644 drivers/net/nfp/nfpcore/nfp_rtsym.c create mode 100644 drivers/net/nfp/nfpcore/nfp_rtsym.h create mode 100644 drivers/net/nfp/nfpcore/nfp_target.h create mode 100644 drivers/net/sfc/base/ef10_image.c create mode 100644 drivers/net/sfc/base/ef10_signed_image_layout.h create mode 100644 drivers/net/sfc/base/efx_regs_mcdi_aoe.h create mode 100644 drivers/net/sfc/base/medford2_impl.h create mode 100644 drivers/net/sfc/base/medford2_nic.c create mode 100644 drivers/net/sfc/sfc_ef10_essb_rx.c create mode 100644 drivers/net/sfc/sfc_ef10_rx_ev.h delete mode 100644 drivers/net/szedata2/szedata2_iobuf.c delete mode 100644 drivers/net/szedata2/szedata2_iobuf.h create mode 100644 drivers/net/szedata2/szedata2_logs.h create mode 100644 drivers/net/tap/tap_log.h create mode 100644 drivers/net/virtio/meson.build (limited to 'drivers/net') diff --git a/drivers/net/Makefile b/drivers/net/Makefile index e1127326..9f9da665 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -12,11 +12,16 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark DIRS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp +DIRS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe +ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y) DIRS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa +endif +ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy) DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2 +endif DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000 DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic @@ -27,7 +32,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4 DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5 -DIRS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl +DIRS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mvpp2 DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null @@ -53,9 +58,12 @@ endif # $(CONFIG_RTE_LIBRTE_SCHED) ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y) DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost +ifeq ($(CONFIG_RTE_EAL_VFIO),y) +DIRS-$(CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD) += ifc +endif endif # $(CONFIG_RTE_LIBRTE_VHOST) -ifeq ($(CONFIG_RTE_LIBRTE_MRVL_PMD),y) +ifeq ($(CONFIG_RTE_LIBRTE_MVPP2_PMD),y) ifeq ($(CONFIG_RTE_LIBRTE_CFGFILE),n) $(error "RTE_LIBRTE_CFGFILE must be enabled in configuration!") endif diff --git a/drivers/net/af_packet/Makefile b/drivers/net/af_packet/Makefile index bb37d67a..39a1e0d2 100644 --- a/drivers/net/af_packet/Makefile +++ b/drivers/net/af_packet/Makefile @@ -1,35 +1,8 @@ -# BSD LICENSE -# -# Copyright(c) 2014 John W. Linville -# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. -# Copyright(c) 2014 6WIND S.A. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2014 John W. Linville +# Copyright(c) 2010-2014 Intel Corporation. +# Copyright(c) 2014 6WIND S.A. +# All rights reserved. include $(RTE_SDK)/mk/rte.vars.mk diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c index 57eccfd0..ea47abbf 100644 --- a/drivers/net/af_packet/rte_eth_af_packet.c +++ b/drivers/net/af_packet/rte_eth_af_packet.c @@ -94,9 +94,15 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG + .link_autoneg = ETH_LINK_FIXED, }; +static int af_packet_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, af_packet_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + static uint16_t eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { @@ -393,8 +399,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); if (data_size > buf_size) { - RTE_LOG(ERR, PMD, - "%s: %d bytes will not fit in mbuf (%d bytes)\n", + PMD_LOG(ERR, + "%s: %d bytes will not fit in mbuf (%d bytes)", dev->device->name, data_size, buf_size); return -ENOMEM; } @@ -515,7 +521,7 @@ open_packet_iface(const char *key __rte_unused, /* Open an AF_PACKET socket... */ *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (*sockfd == -1) { - RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n"); + PMD_LOG(ERR, "Could not open AF_PACKET socket"); return -1; } @@ -561,28 +567,20 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, break; } if (pair == NULL) { - RTE_LOG(ERR, PMD, - "%s: no interface specified for AF_PACKET ethdev\n", + PMD_LOG(ERR, + "%s: no interface specified for AF_PACKET ethdev", name); - goto error_early; + return -1; } - RTE_LOG(INFO, PMD, - "%s: creating AF_PACKET-backed ethdev on numa socket %u\n", + PMD_LOG(INFO, + "%s: creating AF_PACKET-backed ethdev on numa socket %u", name, numa_node); - /* - * now do all data allocation - for eth_dev structure, dummy pci driver - * and internal (private) data - */ - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (data == NULL) - goto error_early; - *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node); if (*internals == NULL) - goto error_early; + return -1; for (q = 0; q < nb_queues; q++) { (*internals)->rx_queue[q].map = MAP_FAILED; @@ -601,27 +599,27 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, memcpy(ifr.ifr_name, pair->value, ifnamelen); ifr.ifr_name[ifnamelen] = '\0'; } else { - RTE_LOG(ERR, PMD, - "%s: I/F name too long (%s)\n", + PMD_LOG(ERR, + "%s: I/F name too long (%s)", name, pair->value); - goto error_early; + return -1; } if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) { - RTE_LOG(ERR, PMD, - "%s: ioctl failed (SIOCGIFINDEX)\n", + PMD_LOG(ERR, + "%s: ioctl failed (SIOCGIFINDEX)", name); - goto error_early; + return -1; } (*internals)->if_name = strdup(pair->value); if ((*internals)->if_name == NULL) - goto error_early; + return -1; (*internals)->if_index = ifr.ifr_ifindex; if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) { - RTE_LOG(ERR, PMD, - "%s: ioctl failed (SIOCGIFHWADDR)\n", + PMD_LOG(ERR, + "%s: ioctl failed (SIOCGIFHWADDR)", name); - goto error_early; + return -1; } memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN); @@ -642,8 +640,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, /* Open an AF_PACKET socket for this queue... */ qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (qsockfd == -1) { - RTE_LOG(ERR, PMD, - "%s: could not open AF_PACKET socket\n", + PMD_LOG(ERR, + "%s: could not open AF_PACKET socket", name); return -1; } @@ -652,9 +650,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION, &tpver, sizeof(tpver)); if (rc == -1) { - RTE_LOG(ERR, PMD, - "%s: could not set PACKET_VERSION on AF_PACKET " - "socket for %s\n", name, pair->value); + PMD_LOG(ERR, + "%s: could not set PACKET_VERSION on AF_PACKET socket for %s", + name, pair->value); goto error; } @@ -662,9 +660,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS, &discard, sizeof(discard)); if (rc == -1) { - RTE_LOG(ERR, PMD, - "%s: could not set PACKET_LOSS on " - "AF_PACKET socket for %s\n", name, pair->value); + PMD_LOG(ERR, + "%s: could not set PACKET_LOSS on AF_PACKET socket for %s", + name, pair->value); goto error; } @@ -672,10 +670,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS, &qdisc_bypass, sizeof(qdisc_bypass)); if (rc == -1) { - RTE_LOG(ERR, PMD, - "%s: could not set PACKET_QDISC_BYPASS " - "on AF_PACKET socket for %s\n", name, - pair->value); + PMD_LOG(ERR, + "%s: could not set PACKET_QDISC_BYPASS on AF_PACKET socket for %s", + name, pair->value); goto error; } #else @@ -684,17 +681,17 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req)); if (rc == -1) { - RTE_LOG(ERR, PMD, - "%s: could not set PACKET_RX_RING on AF_PACKET " - "socket for %s\n", name, pair->value); + PMD_LOG(ERR, + "%s: could not set PACKET_RX_RING on AF_PACKET socket for %s", + name, pair->value); goto error; } rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req)); if (rc == -1) { - RTE_LOG(ERR, PMD, + PMD_LOG(ERR, "%s: could not set PACKET_TX_RING on AF_PACKET " - "socket for %s\n", name, pair->value); + "socket for %s", name, pair->value); goto error; } @@ -705,8 +702,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, qsockfd, 0); if (rx_queue->map == MAP_FAILED) { - RTE_LOG(ERR, PMD, - "%s: call to mmap failed on AF_PACKET socket for %s\n", + PMD_LOG(ERR, + "%s: call to mmap failed on AF_PACKET socket for %s", name, pair->value); goto error; } @@ -742,8 +739,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr)); if (rc == -1) { - RTE_LOG(ERR, PMD, - "%s: could not bind AF_PACKET socket to %s\n", + PMD_LOG(ERR, + "%s: could not bind AF_PACKET socket to %s", name, pair->value); goto error; } @@ -752,9 +749,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT, &fanout_arg, sizeof(fanout_arg)); if (rc == -1) { - RTE_LOG(ERR, PMD, + PMD_LOG(ERR, "%s: could not set PACKET_FANOUT on AF_PACKET socket " - "for %s\n", name, pair->value); + "for %s", name, pair->value); goto error; } #endif @@ -775,14 +772,13 @@ rte_pmd_init_internals(struct rte_vdev_device *dev, (*internals)->nb_queues = nb_queues; - rte_memcpy(data, (*eth_dev)->data, sizeof(*data)); + data = (*eth_dev)->data; data->dev_private = *internals; data->nb_rx_queues = (uint16_t)nb_queues; data->nb_tx_queues = (uint16_t)nb_queues; data->dev_link = pmd_link; data->mac_addrs = &(*internals)->eth_addr; - (*eth_dev)->data = data; (*eth_dev)->dev_ops = &ops; return 0; @@ -802,8 +798,6 @@ error: } free((*internals)->if_name); rte_free(*internals); -error_early: - rte_free(data); return -1; } @@ -837,8 +831,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev, qpairs = atoi(pair->value); if (qpairs < 1 || qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) { - RTE_LOG(ERR, PMD, - "%s: invalid qpairs value\n", + PMD_LOG(ERR, + "%s: invalid qpairs value", name); return -1; } @@ -847,8 +841,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev, if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) { blocksize = atoi(pair->value); if (!blocksize) { - RTE_LOG(ERR, PMD, - "%s: invalid blocksize value\n", + PMD_LOG(ERR, + "%s: invalid blocksize value", name); return -1; } @@ -857,8 +851,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev, if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) { framesize = atoi(pair->value); if (!framesize) { - RTE_LOG(ERR, PMD, - "%s: invalid framesize value\n", + PMD_LOG(ERR, + "%s: invalid framesize value", name); return -1; } @@ -867,8 +861,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev, if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) { framecount = atoi(pair->value); if (!framecount) { - RTE_LOG(ERR, PMD, - "%s: invalid framecount value\n", + PMD_LOG(ERR, + "%s: invalid framecount value", name); return -1; } @@ -877,8 +871,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev, if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) { qdisc_bypass = atoi(pair->value); if (qdisc_bypass > 1) { - RTE_LOG(ERR, PMD, - "%s: invalid bypass value\n", + PMD_LOG(ERR, + "%s: invalid bypass value", name); return -1; } @@ -887,24 +881,24 @@ rte_eth_from_packet(struct rte_vdev_device *dev, } if (framesize > blocksize) { - RTE_LOG(ERR, PMD, - "%s: AF_PACKET MMAP frame size exceeds block size!\n", + PMD_LOG(ERR, + "%s: AF_PACKET MMAP frame size exceeds block size!", name); return -1; } blockcount = framecount / (blocksize / framesize); if (!blockcount) { - RTE_LOG(ERR, PMD, - "%s: invalid AF_PACKET MMAP parameters\n", name); + PMD_LOG(ERR, + "%s: invalid AF_PACKET MMAP parameters", name); return -1; } - RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name); - RTE_LOG(INFO, PMD, "%s:\tblock size %d\n", name, blocksize); - RTE_LOG(INFO, PMD, "%s:\tblock count %d\n", name, blockcount); - RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize); - RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount); + PMD_LOG(INFO, "%s: AF_PACKET MMAP parameters:", name); + PMD_LOG(INFO, "%s:\tblock size %d", name, blocksize); + PMD_LOG(INFO, "%s:\tblock count %d", name, blockcount); + PMD_LOG(INFO, "%s:\tframe size %d", name, framesize); + PMD_LOG(INFO, "%s:\tframe count %d", name, framecount); if (rte_pmd_init_internals(dev, *sockfd, qpairs, blocksize, blockcount, @@ -917,6 +911,7 @@ rte_eth_from_packet(struct rte_vdev_device *dev, eth_dev->rx_pkt_burst = eth_af_packet_rx; eth_dev->tx_pkt_burst = eth_af_packet_tx; + rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -926,9 +921,23 @@ rte_pmd_af_packet_probe(struct rte_vdev_device *dev) int ret = 0; struct rte_kvargs *kvlist; int sockfd = -1; + struct rte_eth_dev *eth_dev; + const char *name = rte_vdev_device_name(dev); - RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n", - rte_vdev_device_name(dev)); + PMD_LOG(INFO, "Initializing pmd_af_packet for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); if (kvlist == NULL) { @@ -966,8 +975,8 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev) struct pmd_internals *internals; unsigned q; - RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n", - rte_socket_id()); + PMD_LOG(INFO, "Closing AF_PACKET ethdev on numa socket %u", + rte_socket_id()); if (dev == NULL) return -1; @@ -985,7 +994,6 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev) free(internals->if_name); rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); @@ -1006,3 +1014,12 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_packet, "framesz= " "framecnt= " "qdisc_bypass=<0|1>"); + +RTE_INIT(af_packet_init_log); +static void +af_packet_init_log(void) +{ + af_packet_logtype = rte_log_register("pmd.net.packet"); + if (af_packet_logtype >= 0) + rte_log_set_level(af_packet_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c index ff87c20e..834d8a9e 100644 --- a/drivers/net/ark/ark_ethdev.c +++ b/drivers/net/ark/ark_ethdev.c @@ -69,7 +69,7 @@ static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev); static int eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev); -static void eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, +static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int eth_ark_macaddr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, @@ -390,6 +390,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev) if (p == 0) { /* First port is already allocated by DPDK */ eth_dev = ark->eth_dev; + rte_eth_dev_probing_finish(eth_dev); continue; } @@ -422,6 +423,8 @@ eth_ark_dev_init(struct rte_eth_dev *dev) ark->user_data[eth_dev->data->port_id] = ark->user_ext.dev_init(dev, ark->a_bar, p); } + + rte_eth_dev_probing_finish(eth_dev); } return ret; @@ -771,7 +774,6 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); } static int @@ -887,16 +889,19 @@ eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) ark->user_data[dev->data->port_id]); } -static void +static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private; - if (ark->user_ext.mac_addr_set) + if (ark->user_ext.mac_addr_set) { ark->user_ext.mac_addr_set(dev, mac_addr, ark->user_data[dev->data->port_id]); + return 0; + } + return -ENOTSUP; } static int diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c index 4df66170..ad83a57e 100644 --- a/drivers/net/avf/avf_ethdev.c +++ b/drivers/net/avf/avf_ethdev.c @@ -65,7 +65,7 @@ static int avf_dev_rss_hash_update(struct rte_eth_dev *dev, static int avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -static void avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, +static int avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); @@ -339,17 +339,18 @@ static int avf_config_rx_queues_irqs(struct rte_eth_dev *dev, AVF_WRITE_FLUSH(hw); /* map all queues to the same interrupt */ for (i = 0; i < dev->data->nb_rx_queues; i++) - vf->rxq_map[0] |= 1 << i; + vf->rxq_map[vf->msix_base] |= 1 << i; } else { if (!rte_intr_allow_others(intr_handle)) { vf->nb_msix = 1; vf->msix_base = AVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - vf->rxq_map[0] |= 1 << i; + vf->rxq_map[vf->msix_base] |= 1 << i; intr_handle->intr_vec[i] = AVF_MISC_VEC_ID; } PMD_DRV_LOG(DEBUG, - "vector 0 are mapping to all Rx queues"); + "vector %u are mapping to all Rx queues", + vf->msix_base); } else { /* If Rx interrupt is reuquired, and we can use * multi interrupts, then the vec is from 1 @@ -474,7 +475,7 @@ avf_dev_stop(struct rte_eth_dev *dev) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev); + struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = dev->intr_handle; int ret, i; @@ -507,7 +508,6 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); memset(dev_info, 0, sizeof(*dev_info)); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN; @@ -532,13 +532,13 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -608,7 +608,7 @@ avf_dev_link_update(struct rte_eth_dev *dev, new_link.link_duplex = ETH_LINK_FULL_DUPLEX; new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; - new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds & + new_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link, @@ -759,7 +759,7 @@ avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) /* Vlan stripping setting */ if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ - if (dev_conf->rxmode.hw_vlan_strip) + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) err = avf_enable_vlan_strip(adapter); else err = avf_disable_vlan_strip(adapter); @@ -926,7 +926,7 @@ avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return ret; } -static void +static int avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { @@ -940,11 +940,11 @@ avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, perm_addr = (struct ether_addr *)hw->mac.perm_addr; if (is_same_ether_addr(mac_addr, old_addr)) - return; + return 0; /* If the MAC address is configured by host, skip the setting */ if (is_valid_assigned_ether_addr(perm_addr)) - return; + return -EPERM; ret = avf_add_del_eth_addr(adapter, old_addr, FALSE); if (ret) @@ -968,7 +968,11 @@ avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, mac_addr->addr_bytes[4], mac_addr->addr_bytes[5]); + if (ret) + return -EIO; + ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr); + return 0; } static int @@ -1365,8 +1369,8 @@ avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw, return AVF_ERR_PARAM; snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand()); - mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0, - alignment, RTE_PGSIZE_2M); + mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); if (!mz) return AVF_ERR_NO_MEMORY; diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c index d276d975..e03a136f 100644 --- a/drivers/net/avf/avf_rxtx.c +++ b/drivers/net/avf/avf_rxtx.c @@ -109,7 +109,7 @@ check_rx_vec_allow(struct avf_rx_queue *rxq) static inline bool check_tx_vec_allow(struct avf_tx_queue *txq) { - if ((txq->txq_flags & AVF_SIMPLE_FLAGS) == AVF_SIMPLE_FLAGS && + if (!(txq->offloads & AVF_NO_VECTOR_FLAGS) && txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST && txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) { PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq."); @@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint32_t ring_size; uint16_t tx_rs_thresh, tx_free_thresh; uint16_t i, base, bsf, tc_mapping; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + if (nb_desc % AVF_ALIGN_RING_DESC != 0 || nb_desc > AVF_MAX_RING_DESC || nb_desc < AVF_MIN_RING_DESC) { @@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->free_thresh = tx_free_thresh; txq->queue_id = queue_idx; txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; + txq->offloads = offloads; txq->tx_deferred_start = tx_conf->tx_deferred_start; /* Allocate software ring */ @@ -1831,7 +1834,7 @@ avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->free_thresh; qinfo->conf.tx_rs_thresh = txq->rs_thresh; - qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.offloads = txq->offloads; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } diff --git a/drivers/net/avf/avf_rxtx.h b/drivers/net/avf/avf_rxtx.h index d1701cd6..297d0776 100644 --- a/drivers/net/avf/avf_rxtx.h +++ b/drivers/net/avf/avf_rxtx.h @@ -22,8 +22,12 @@ #define AVF_VPMD_DESCS_PER_LOOP 4 #define AVF_VPMD_TX_MAX_FREE_BUF 64 -#define AVF_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) +#define AVF_NO_VECTOR_FLAGS ( \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ + DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_SCTP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM) #define DEFAULT_TX_RS_THRESH 32 #define DEFAULT_TX_FREE_THRESH 32 @@ -125,7 +129,7 @@ struct avf_tx_queue { uint16_t port_id; uint16_t queue_id; - uint32_t txq_flags; + uint64_t offloads; uint16_t next_dd; /* next to set RS, for VPMD */ uint16_t next_rs; /* next to check DD, for VPMD */ diff --git a/drivers/net/avp/Makefile b/drivers/net/avp/Makefile index c29ecf45..c9db667f 100644 --- a/drivers/net/avp/Makefile +++ b/drivers/net/avp/Makefile @@ -1,34 +1,5 @@ -# BSD LICENSE -# -# Copyright(c) 2013-2017, Wind River Systems, Inc. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Wind River Systems nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2013-2017, Wind River Systems, Inc. include $(RTE_SDK)/mk/rte.vars.mk diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c index dba99120..dc97e60e 100644 --- a/drivers/net/avp/avp_ethdev.c +++ b/drivers/net/avp/avp_ethdev.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (c) 2013-2017, Wind River Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1) Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2) Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3) Neither the name of Wind River Systems nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2017 Wind River Systems, Inc. */ #include @@ -1076,19 +1048,8 @@ static int eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - struct rte_eth_dev *eth_dev; - int ret; - - eth_dev = rte_eth_dev_pci_allocate(pci_dev, - sizeof(struct avp_adapter)); - if (eth_dev == NULL) - return -ENOMEM; - - ret = eth_avp_dev_init(eth_dev); - if (ret) - rte_eth_dev_pci_release(eth_dev); - - return ret; + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter), + eth_avp_dev_init); } static int @@ -2074,12 +2035,6 @@ avp_dev_start(struct rte_eth_dev *eth_dev) goto unlock; } - /* disable features that we do not support */ - eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0; - eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0; - eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0; - eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0; - /* update link state */ ret = avp_dev_ctrl_set_link_state(eth_dev, 1); if (ret < 0) { @@ -2206,7 +2161,6 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev, { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); dev_info->max_rx_queues = avp->max_rx_queues; dev_info->max_tx_queues = avp->max_tx_queues; dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE; @@ -2222,10 +2176,12 @@ static int avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + uint64_t offloads = dev_conf->rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD; else avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD; @@ -2235,12 +2191,12 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) } if (mask & ETH_VLAN_FILTER_MASK) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) + if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n"); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend) + if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n"); } diff --git a/drivers/net/avp/avp_logs.h b/drivers/net/avp/avp_logs.h index e29394d5..6e297c7a 100644 --- a/drivers/net/avp/avp_logs.h +++ b/drivers/net/avp/avp_logs.h @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (c) 2013-2015, Wind River Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1) Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2) Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3) Neither the name of Wind River Systems nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2017 Wind River Systems, Inc. */ #ifndef _AVP_LOGS_H_ diff --git a/drivers/net/avp/rte_avp_common.h b/drivers/net/avp/rte_avp_common.h index 81dfe5ea..aa95159c 100644 --- a/drivers/net/avp/rte_avp_common.h +++ b/drivers/net/avp/rte_avp_common.h @@ -1,57 +1,6 @@ -/*- - * This file is provided under a dual BSD/LGPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GNU LESSER GENERAL PUBLIC LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2.1 of the GNU Lesser General Public License - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * Contact Information: - * Wind River Systems, Inc. - * - * - * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * +/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1) + * Copyright(c) 2010-2013 Intel Corporation. + * Copyright(c) 2014-2017 Wind River Systems, Inc. */ #ifndef _RTE_AVP_COMMON_H_ diff --git a/drivers/net/avp/rte_avp_fifo.h b/drivers/net/avp/rte_avp_fifo.h index 803eb80a..c1658da6 100644 --- a/drivers/net/avp/rte_avp_fifo.h +++ b/drivers/net/avp/rte_avp_fifo.h @@ -1,57 +1,6 @@ -/*- - * This file is provided under a dual BSD/LGPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GNU LESSER GENERAL PUBLIC LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * Copyright(c) 2014 Wind River Systems, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2.1 of the GNU Lesser General Public License - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * Contact Information: - * Wind River Systems, Inc. - * - * - * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * Copyright(c) 2013-2017 Wind River Systems, Inc. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * +/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1) + * Copyright(c) 2010-2013 Intel Corporation. + * Copyright(c) 2013-2017 Wind River Systems, Inc. */ #ifndef _RTE_AVP_FIFO_H_ diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile new file mode 100644 index 00000000..72215aed --- /dev/null +++ b/drivers/net/axgbe/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_axgbe.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_axgbe_version.map + +LIBABIVER := 1 + +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool +LDLIBS += -lrte_pci -lrte_bus_pci +LDLIBS += -lrte_ethdev + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx.c +ifeq ($(CONFIG_RTE_ARCH_X86),y) +SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx_vec_sse.c +endif + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h new file mode 100644 index 00000000..d25d54ca --- /dev/null +++ b/drivers/net/axgbe/axgbe_common.h @@ -0,0 +1,1710 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef __AXGBE_COMMON_H__ +#define __AXGBE_COMMON_H__ + +#include "axgbe_logs.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BIT(nr) (1 << (nr)) +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#endif + +#define AXGBE_HZ 250 + +/* DMA register offsets */ +#define DMA_MR 0x3000 +#define DMA_SBMR 0x3004 +#define DMA_ISR 0x3008 +#define DMA_AXIARCR 0x3010 +#define DMA_AXIAWCR 0x3018 +#define DMA_AXIAWRCR 0x301c +#define DMA_DSR0 0x3020 +#define DMA_DSR1 0x3024 +#define EDMA_TX_CONTROL 0x3040 +#define EDMA_RX_CONTROL 0x3044 + +/* DMA register entry bit positions and sizes */ +#define DMA_AXIARCR_DRC_INDEX 0 +#define DMA_AXIARCR_DRC_WIDTH 4 +#define DMA_AXIARCR_DRD_INDEX 4 +#define DMA_AXIARCR_DRD_WIDTH 2 +#define DMA_AXIARCR_TEC_INDEX 8 +#define DMA_AXIARCR_TEC_WIDTH 4 +#define DMA_AXIARCR_TED_INDEX 12 +#define DMA_AXIARCR_TED_WIDTH 2 +#define DMA_AXIARCR_THC_INDEX 16 +#define DMA_AXIARCR_THC_WIDTH 4 +#define DMA_AXIARCR_THD_INDEX 20 +#define DMA_AXIARCR_THD_WIDTH 2 +#define DMA_AXIAWCR_DWC_INDEX 0 +#define DMA_AXIAWCR_DWC_WIDTH 4 +#define DMA_AXIAWCR_DWD_INDEX 4 +#define DMA_AXIAWCR_DWD_WIDTH 2 +#define DMA_AXIAWCR_RPC_INDEX 8 +#define DMA_AXIAWCR_RPC_WIDTH 4 +#define DMA_AXIAWCR_RPD_INDEX 12 +#define DMA_AXIAWCR_RPD_WIDTH 2 +#define DMA_AXIAWCR_RHC_INDEX 16 +#define DMA_AXIAWCR_RHC_WIDTH 4 +#define DMA_AXIAWCR_RHD_INDEX 20 +#define DMA_AXIAWCR_RHD_WIDTH 2 +#define DMA_AXIAWCR_RDC_INDEX 24 +#define DMA_AXIAWCR_RDC_WIDTH 4 +#define DMA_AXIAWCR_RDD_INDEX 28 +#define DMA_AXIAWCR_RDD_WIDTH 2 +#define DMA_AXIAWRCR_TDWC_INDEX 0 +#define DMA_AXIAWRCR_TDWC_WIDTH 4 +#define DMA_AXIAWRCR_TDWD_INDEX 4 +#define DMA_AXIAWRCR_TDWD_WIDTH 4 +#define DMA_AXIAWRCR_RDRC_INDEX 8 +#define DMA_AXIAWRCR_RDRC_WIDTH 4 +#define DMA_ISR_MACIS_INDEX 17 +#define DMA_ISR_MACIS_WIDTH 1 +#define DMA_ISR_MTLIS_INDEX 16 +#define DMA_ISR_MTLIS_WIDTH 1 +#define DMA_MR_INTM_INDEX 12 +#define DMA_MR_INTM_WIDTH 2 +#define DMA_MR_SWR_INDEX 0 +#define DMA_MR_SWR_WIDTH 1 +#define DMA_SBMR_WR_OSR_INDEX 24 +#define DMA_SBMR_WR_OSR_WIDTH 6 +#define DMA_SBMR_RD_OSR_INDEX 16 +#define DMA_SBMR_RD_OSR_WIDTH 6 +#define DMA_SBMR_AAL_INDEX 12 +#define DMA_SBMR_AAL_WIDTH 1 +#define DMA_SBMR_EAME_INDEX 11 +#define DMA_SBMR_EAME_WIDTH 1 +#define DMA_SBMR_BLEN_256_INDEX 7 +#define DMA_SBMR_BLEN_256_WIDTH 1 +#define DMA_SBMR_BLEN_32_INDEX 4 +#define DMA_SBMR_BLEN_32_WIDTH 1 +#define DMA_SBMR_UNDEF_INDEX 0 +#define DMA_SBMR_UNDEF_WIDTH 1 + +/* DMA register values */ +#define DMA_DSR_RPS_WIDTH 4 +#define DMA_DSR_TPS_WIDTH 4 +#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH) +#define DMA_DSR0_RPS_START 8 +#define DMA_DSR0_TPS_START 12 +#define DMA_DSRX_FIRST_QUEUE 3 +#define DMA_DSRX_INC 4 +#define DMA_DSRX_QPR 4 +#define DMA_DSRX_RPS_START 0 +#define DMA_DSRX_TPS_START 4 +#define DMA_TPS_STOPPED 0x00 +#define DMA_TPS_SUSPENDED 0x06 + +/* DMA channel register offsets + * Multiple channels can be active. The first channel has registers + * that begin at 0x3100. Each subsequent channel has registers that + * are accessed using an offset of 0x80 from the previous channel. + */ +#define DMA_CH_BASE 0x3100 +#define DMA_CH_INC 0x80 + +#define DMA_CH_CR 0x00 +#define DMA_CH_TCR 0x04 +#define DMA_CH_RCR 0x08 +#define DMA_CH_TDLR_HI 0x10 +#define DMA_CH_TDLR_LO 0x14 +#define DMA_CH_RDLR_HI 0x18 +#define DMA_CH_RDLR_LO 0x1c +#define DMA_CH_TDTR_LO 0x24 +#define DMA_CH_RDTR_LO 0x2c +#define DMA_CH_TDRLR 0x30 +#define DMA_CH_RDRLR 0x34 +#define DMA_CH_IER 0x38 +#define DMA_CH_RIWT 0x3c +#define DMA_CH_CATDR_LO 0x44 +#define DMA_CH_CARDR_LO 0x4c +#define DMA_CH_CATBR_HI 0x50 +#define DMA_CH_CATBR_LO 0x54 +#define DMA_CH_CARBR_HI 0x58 +#define DMA_CH_CARBR_LO 0x5c +#define DMA_CH_SR 0x60 + +/* DMA channel register entry bit positions and sizes */ +#define DMA_CH_CR_PBLX8_INDEX 16 +#define DMA_CH_CR_PBLX8_WIDTH 1 +#define DMA_CH_CR_SPH_INDEX 24 +#define DMA_CH_CR_SPH_WIDTH 1 +#define DMA_CH_IER_AIE_INDEX 14 +#define DMA_CH_IER_AIE_WIDTH 1 +#define DMA_CH_IER_FBEE_INDEX 12 +#define DMA_CH_IER_FBEE_WIDTH 1 +#define DMA_CH_IER_NIE_INDEX 15 +#define DMA_CH_IER_NIE_WIDTH 1 +#define DMA_CH_IER_RBUE_INDEX 7 +#define DMA_CH_IER_RBUE_WIDTH 1 +#define DMA_CH_IER_RIE_INDEX 6 +#define DMA_CH_IER_RIE_WIDTH 1 +#define DMA_CH_IER_RSE_INDEX 8 +#define DMA_CH_IER_RSE_WIDTH 1 +#define DMA_CH_IER_TBUE_INDEX 2 +#define DMA_CH_IER_TBUE_WIDTH 1 +#define DMA_CH_IER_TIE_INDEX 0 +#define DMA_CH_IER_TIE_WIDTH 1 +#define DMA_CH_IER_TXSE_INDEX 1 +#define DMA_CH_IER_TXSE_WIDTH 1 +#define DMA_CH_RCR_PBL_INDEX 16 +#define DMA_CH_RCR_PBL_WIDTH 6 +#define DMA_CH_RCR_RBSZ_INDEX 1 +#define DMA_CH_RCR_RBSZ_WIDTH 14 +#define DMA_CH_RCR_SR_INDEX 0 +#define DMA_CH_RCR_SR_WIDTH 1 +#define DMA_CH_RIWT_RWT_INDEX 0 +#define DMA_CH_RIWT_RWT_WIDTH 8 +#define DMA_CH_SR_FBE_INDEX 12 +#define DMA_CH_SR_FBE_WIDTH 1 +#define DMA_CH_SR_RBU_INDEX 7 +#define DMA_CH_SR_RBU_WIDTH 1 +#define DMA_CH_SR_RI_INDEX 6 +#define DMA_CH_SR_RI_WIDTH 1 +#define DMA_CH_SR_RPS_INDEX 8 +#define DMA_CH_SR_RPS_WIDTH 1 +#define DMA_CH_SR_TBU_INDEX 2 +#define DMA_CH_SR_TBU_WIDTH 1 +#define DMA_CH_SR_TI_INDEX 0 +#define DMA_CH_SR_TI_WIDTH 1 +#define DMA_CH_SR_TPS_INDEX 1 +#define DMA_CH_SR_TPS_WIDTH 1 +#define DMA_CH_TCR_OSP_INDEX 4 +#define DMA_CH_TCR_OSP_WIDTH 1 +#define DMA_CH_TCR_PBL_INDEX 16 +#define DMA_CH_TCR_PBL_WIDTH 6 +#define DMA_CH_TCR_ST_INDEX 0 +#define DMA_CH_TCR_ST_WIDTH 1 +#define DMA_CH_TCR_TSE_INDEX 12 +#define DMA_CH_TCR_TSE_WIDTH 1 + +/* DMA channel register values */ +#define DMA_OSP_DISABLE 0x00 +#define DMA_OSP_ENABLE 0x01 +#define DMA_PBL_1 1 +#define DMA_PBL_2 2 +#define DMA_PBL_4 4 +#define DMA_PBL_8 8 +#define DMA_PBL_16 16 +#define DMA_PBL_32 32 +#define DMA_PBL_64 64 /* 8 x 8 */ +#define DMA_PBL_128 128 /* 8 x 16 */ +#define DMA_PBL_256 256 /* 8 x 32 */ +#define DMA_PBL_X8_DISABLE 0x00 +#define DMA_PBL_X8_ENABLE 0x01 + +/* MAC register offsets */ +#define MAC_TCR 0x0000 +#define MAC_RCR 0x0004 +#define MAC_PFR 0x0008 +#define MAC_WTR 0x000c +#define MAC_HTR0 0x0010 +#define MAC_VLANTR 0x0050 +#define MAC_VLANHTR 0x0058 +#define MAC_VLANIR 0x0060 +#define MAC_IVLANIR 0x0064 +#define MAC_RETMR 0x006c +#define MAC_Q0TFCR 0x0070 +#define MAC_RFCR 0x0090 +#define MAC_RQC0R 0x00a0 +#define MAC_RQC1R 0x00a4 +#define MAC_RQC2R 0x00a8 +#define MAC_RQC3R 0x00ac +#define MAC_ISR 0x00b0 +#define MAC_IER 0x00b4 +#define MAC_RTSR 0x00b8 +#define MAC_PMTCSR 0x00c0 +#define MAC_RWKPFR 0x00c4 +#define MAC_LPICSR 0x00d0 +#define MAC_LPITCR 0x00d4 +#define MAC_VR 0x0110 +#define MAC_DR 0x0114 +#define MAC_HWF0R 0x011c +#define MAC_HWF1R 0x0120 +#define MAC_HWF2R 0x0124 +#define MAC_MDIOSCAR 0x0200 +#define MAC_MDIOSCCDR 0x0204 +#define MAC_MDIOISR 0x0214 +#define MAC_MDIOIER 0x0218 +#define MAC_MDIOCL22R 0x0220 +#define MAC_GPIOCR 0x0278 +#define MAC_GPIOSR 0x027c +#define MAC_MACA0HR 0x0300 +#define MAC_MACA0LR 0x0304 +#define MAC_MACA1HR 0x0308 +#define MAC_MACA1LR 0x030c +#define MAC_RSSCR 0x0c80 +#define MAC_RSSAR 0x0c88 +#define MAC_RSSDR 0x0c8c +#define MAC_TSCR 0x0d00 +#define MAC_SSIR 0x0d04 +#define MAC_STSR 0x0d08 +#define MAC_STNR 0x0d0c +#define MAC_STSUR 0x0d10 +#define MAC_STNUR 0x0d14 +#define MAC_TSAR 0x0d18 +#define MAC_TSSR 0x0d20 +#define MAC_TXSNR 0x0d30 +#define MAC_TXSSR 0x0d34 + +#define MAC_QTFCR_INC 4 +#define MAC_MACA_INC 4 +#define MAC_HTR_INC 4 + +#define MAC_RQC2_INC 4 +#define MAC_RQC2_Q_PER_REG 4 + +/* MAC register entry bit positions and sizes */ +#define MAC_HWF0R_ADDMACADRSEL_INDEX 18 +#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5 +#define MAC_HWF0R_ARPOFFSEL_INDEX 9 +#define MAC_HWF0R_ARPOFFSEL_WIDTH 1 +#define MAC_HWF0R_EEESEL_INDEX 13 +#define MAC_HWF0R_EEESEL_WIDTH 1 +#define MAC_HWF0R_GMIISEL_INDEX 1 +#define MAC_HWF0R_GMIISEL_WIDTH 1 +#define MAC_HWF0R_MGKSEL_INDEX 7 +#define MAC_HWF0R_MGKSEL_WIDTH 1 +#define MAC_HWF0R_MMCSEL_INDEX 8 +#define MAC_HWF0R_MMCSEL_WIDTH 1 +#define MAC_HWF0R_RWKSEL_INDEX 6 +#define MAC_HWF0R_RWKSEL_WIDTH 1 +#define MAC_HWF0R_RXCOESEL_INDEX 16 +#define MAC_HWF0R_RXCOESEL_WIDTH 1 +#define MAC_HWF0R_SAVLANINS_INDEX 27 +#define MAC_HWF0R_SAVLANINS_WIDTH 1 +#define MAC_HWF0R_SMASEL_INDEX 5 +#define MAC_HWF0R_SMASEL_WIDTH 1 +#define MAC_HWF0R_TSSEL_INDEX 12 +#define MAC_HWF0R_TSSEL_WIDTH 1 +#define MAC_HWF0R_TSSTSSEL_INDEX 25 +#define MAC_HWF0R_TSSTSSEL_WIDTH 2 +#define MAC_HWF0R_TXCOESEL_INDEX 14 +#define MAC_HWF0R_TXCOESEL_WIDTH 1 +#define MAC_HWF0R_VLHASH_INDEX 4 +#define MAC_HWF0R_VLHASH_WIDTH 1 +#define MAC_HWF1R_ADDR64_INDEX 14 +#define MAC_HWF1R_ADDR64_WIDTH 2 +#define MAC_HWF1R_ADVTHWORD_INDEX 13 +#define MAC_HWF1R_ADVTHWORD_WIDTH 1 +#define MAC_HWF1R_DBGMEMA_INDEX 19 +#define MAC_HWF1R_DBGMEMA_WIDTH 1 +#define MAC_HWF1R_DCBEN_INDEX 16 +#define MAC_HWF1R_DCBEN_WIDTH 1 +#define MAC_HWF1R_HASHTBLSZ_INDEX 24 +#define MAC_HWF1R_HASHTBLSZ_WIDTH 3 +#define MAC_HWF1R_L3L4FNUM_INDEX 27 +#define MAC_HWF1R_L3L4FNUM_WIDTH 4 +#define MAC_HWF1R_NUMTC_INDEX 21 +#define MAC_HWF1R_NUMTC_WIDTH 3 +#define MAC_HWF1R_RSSEN_INDEX 20 +#define MAC_HWF1R_RSSEN_WIDTH 1 +#define MAC_HWF1R_RXFIFOSIZE_INDEX 0 +#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5 +#define MAC_HWF1R_SPHEN_INDEX 17 +#define MAC_HWF1R_SPHEN_WIDTH 1 +#define MAC_HWF1R_TSOEN_INDEX 18 +#define MAC_HWF1R_TSOEN_WIDTH 1 +#define MAC_HWF1R_TXFIFOSIZE_INDEX 6 +#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5 +#define MAC_HWF2R_AUXSNAPNUM_INDEX 28 +#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3 +#define MAC_HWF2R_PPSOUTNUM_INDEX 24 +#define MAC_HWF2R_PPSOUTNUM_WIDTH 3 +#define MAC_HWF2R_RXCHCNT_INDEX 12 +#define MAC_HWF2R_RXCHCNT_WIDTH 4 +#define MAC_HWF2R_RXQCNT_INDEX 0 +#define MAC_HWF2R_RXQCNT_WIDTH 4 +#define MAC_HWF2R_TXCHCNT_INDEX 18 +#define MAC_HWF2R_TXCHCNT_WIDTH 4 +#define MAC_HWF2R_TXQCNT_INDEX 6 +#define MAC_HWF2R_TXQCNT_WIDTH 4 +#define MAC_IER_TSIE_INDEX 12 +#define MAC_IER_TSIE_WIDTH 1 +#define MAC_ISR_MMCRXIS_INDEX 9 +#define MAC_ISR_MMCRXIS_WIDTH 1 +#define MAC_ISR_MMCTXIS_INDEX 10 +#define MAC_ISR_MMCTXIS_WIDTH 1 +#define MAC_ISR_PMTIS_INDEX 4 +#define MAC_ISR_PMTIS_WIDTH 1 +#define MAC_ISR_SMI_INDEX 1 +#define MAC_ISR_SMI_WIDTH 1 +#define MAC_ISR_LSI_INDEX 0 +#define MAC_ISR_LSI_WIDTH 1 +#define MAC_ISR_LS_INDEX 24 +#define MAC_ISR_LS_WIDTH 2 +#define MAC_ISR_TSIS_INDEX 12 +#define MAC_ISR_TSIS_WIDTH 1 +#define MAC_MACA1HR_AE_INDEX 31 +#define MAC_MACA1HR_AE_WIDTH 1 +#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12 +#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1 +#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12 +#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1 +#define MAC_MDIOSCAR_DA_INDEX 21 +#define MAC_MDIOSCAR_DA_WIDTH 5 +#define MAC_MDIOSCAR_PA_INDEX 16 +#define MAC_MDIOSCAR_PA_WIDTH 5 +#define MAC_MDIOSCAR_RA_INDEX 0 +#define MAC_MDIOSCAR_RA_WIDTH 16 +#define MAC_MDIOSCAR_REG_INDEX 0 +#define MAC_MDIOSCAR_REG_WIDTH 21 +#define MAC_MDIOSCCDR_BUSY_INDEX 22 +#define MAC_MDIOSCCDR_BUSY_WIDTH 1 +#define MAC_MDIOSCCDR_CMD_INDEX 16 +#define MAC_MDIOSCCDR_CMD_WIDTH 2 +#define MAC_MDIOSCCDR_CR_INDEX 19 +#define MAC_MDIOSCCDR_CR_WIDTH 3 +#define MAC_MDIOSCCDR_DATA_INDEX 0 +#define MAC_MDIOSCCDR_DATA_WIDTH 16 +#define MAC_MDIOSCCDR_SADDR_INDEX 18 +#define MAC_MDIOSCCDR_SADDR_WIDTH 1 +#define MAC_PFR_HMC_INDEX 2 +#define MAC_PFR_HMC_WIDTH 1 +#define MAC_PFR_HPF_INDEX 10 +#define MAC_PFR_HPF_WIDTH 1 +#define MAC_PFR_HUC_INDEX 1 +#define MAC_PFR_HUC_WIDTH 1 +#define MAC_PFR_PM_INDEX 4 +#define MAC_PFR_PM_WIDTH 1 +#define MAC_PFR_PR_INDEX 0 +#define MAC_PFR_PR_WIDTH 1 +#define MAC_PFR_VTFE_INDEX 16 +#define MAC_PFR_VTFE_WIDTH 1 +#define MAC_PMTCSR_MGKPKTEN_INDEX 1 +#define MAC_PMTCSR_MGKPKTEN_WIDTH 1 +#define MAC_PMTCSR_PWRDWN_INDEX 0 +#define MAC_PMTCSR_PWRDWN_WIDTH 1 +#define MAC_PMTCSR_RWKFILTRST_INDEX 31 +#define MAC_PMTCSR_RWKFILTRST_WIDTH 1 +#define MAC_PMTCSR_RWKPKTEN_INDEX 2 +#define MAC_PMTCSR_RWKPKTEN_WIDTH 1 +#define MAC_Q0TFCR_PT_INDEX 16 +#define MAC_Q0TFCR_PT_WIDTH 16 +#define MAC_Q0TFCR_TFE_INDEX 1 +#define MAC_Q0TFCR_TFE_WIDTH 1 +#define MAC_RCR_ACS_INDEX 1 +#define MAC_RCR_ACS_WIDTH 1 +#define MAC_RCR_CST_INDEX 2 +#define MAC_RCR_CST_WIDTH 1 +#define MAC_RCR_DCRCC_INDEX 3 +#define MAC_RCR_DCRCC_WIDTH 1 +#define MAC_RCR_HDSMS_INDEX 12 +#define MAC_RCR_HDSMS_WIDTH 3 +#define MAC_RCR_IPC_INDEX 9 +#define MAC_RCR_IPC_WIDTH 1 +#define MAC_RCR_JE_INDEX 8 +#define MAC_RCR_JE_WIDTH 1 +#define MAC_RCR_LM_INDEX 10 +#define MAC_RCR_LM_WIDTH 1 +#define MAC_RCR_RE_INDEX 0 +#define MAC_RCR_RE_WIDTH 1 +#define MAC_RFCR_PFCE_INDEX 8 +#define MAC_RFCR_PFCE_WIDTH 1 +#define MAC_RFCR_RFE_INDEX 0 +#define MAC_RFCR_RFE_WIDTH 1 +#define MAC_RFCR_UP_INDEX 1 +#define MAC_RFCR_UP_WIDTH 1 +#define MAC_RQC0R_RXQ0EN_INDEX 0 +#define MAC_RQC0R_RXQ0EN_WIDTH 2 +#define MAC_RSSAR_ADDRT_INDEX 2 +#define MAC_RSSAR_ADDRT_WIDTH 1 +#define MAC_RSSAR_CT_INDEX 1 +#define MAC_RSSAR_CT_WIDTH 1 +#define MAC_RSSAR_OB_INDEX 0 +#define MAC_RSSAR_OB_WIDTH 1 +#define MAC_RSSAR_RSSIA_INDEX 8 +#define MAC_RSSAR_RSSIA_WIDTH 8 +#define MAC_RSSCR_IP2TE_INDEX 1 +#define MAC_RSSCR_IP2TE_WIDTH 1 +#define MAC_RSSCR_RSSE_INDEX 0 +#define MAC_RSSCR_RSSE_WIDTH 1 +#define MAC_RSSCR_TCP4TE_INDEX 2 +#define MAC_RSSCR_TCP4TE_WIDTH 1 +#define MAC_RSSCR_UDP4TE_INDEX 3 +#define MAC_RSSCR_UDP4TE_WIDTH 1 +#define MAC_RSSDR_DMCH_INDEX 0 +#define MAC_RSSDR_DMCH_WIDTH 4 +#define MAC_SSIR_SNSINC_INDEX 8 +#define MAC_SSIR_SNSINC_WIDTH 8 +#define MAC_SSIR_SSINC_INDEX 16 +#define MAC_SSIR_SSINC_WIDTH 8 +#define MAC_TCR_SS_INDEX 29 +#define MAC_TCR_SS_WIDTH 2 +#define MAC_TCR_TE_INDEX 0 +#define MAC_TCR_TE_WIDTH 1 +#define MAC_TSCR_AV8021ASMEN_INDEX 28 +#define MAC_TSCR_AV8021ASMEN_WIDTH 1 +#define MAC_TSCR_SNAPTYPSEL_INDEX 16 +#define MAC_TSCR_SNAPTYPSEL_WIDTH 2 +#define MAC_TSCR_TSADDREG_INDEX 5 +#define MAC_TSCR_TSADDREG_WIDTH 1 +#define MAC_TSCR_TSCFUPDT_INDEX 1 +#define MAC_TSCR_TSCFUPDT_WIDTH 1 +#define MAC_TSCR_TSCTRLSSR_INDEX 9 +#define MAC_TSCR_TSCTRLSSR_WIDTH 1 +#define MAC_TSCR_TSENA_INDEX 0 +#define MAC_TSCR_TSENA_WIDTH 1 +#define MAC_TSCR_TSENALL_INDEX 8 +#define MAC_TSCR_TSENALL_WIDTH 1 +#define MAC_TSCR_TSEVNTENA_INDEX 14 +#define MAC_TSCR_TSEVNTENA_WIDTH 1 +#define MAC_TSCR_TSINIT_INDEX 2 +#define MAC_TSCR_TSINIT_WIDTH 1 +#define MAC_TSCR_TSIPENA_INDEX 11 +#define MAC_TSCR_TSIPENA_WIDTH 1 +#define MAC_TSCR_TSIPV4ENA_INDEX 13 +#define MAC_TSCR_TSIPV4ENA_WIDTH 1 +#define MAC_TSCR_TSIPV6ENA_INDEX 12 +#define MAC_TSCR_TSIPV6ENA_WIDTH 1 +#define MAC_TSCR_TSMSTRENA_INDEX 15 +#define MAC_TSCR_TSMSTRENA_WIDTH 1 +#define MAC_TSCR_TSVER2ENA_INDEX 10 +#define MAC_TSCR_TSVER2ENA_WIDTH 1 +#define MAC_TSCR_TXTSSTSM_INDEX 24 +#define MAC_TSCR_TXTSSTSM_WIDTH 1 +#define MAC_TSSR_TXTSC_INDEX 15 +#define MAC_TSSR_TXTSC_WIDTH 1 +#define MAC_TXSNR_TXTSSTSMIS_INDEX 31 +#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1 +#define MAC_VLANHTR_VLHT_INDEX 0 +#define MAC_VLANHTR_VLHT_WIDTH 16 +#define MAC_VLANIR_VLTI_INDEX 20 +#define MAC_VLANIR_VLTI_WIDTH 1 +#define MAC_VLANIR_CSVL_INDEX 19 +#define MAC_VLANIR_CSVL_WIDTH 1 +#define MAC_VLANTR_DOVLTC_INDEX 20 +#define MAC_VLANTR_DOVLTC_WIDTH 1 +#define MAC_VLANTR_ERSVLM_INDEX 19 +#define MAC_VLANTR_ERSVLM_WIDTH 1 +#define MAC_VLANTR_ESVL_INDEX 18 +#define MAC_VLANTR_ESVL_WIDTH 1 +#define MAC_VLANTR_ETV_INDEX 16 +#define MAC_VLANTR_ETV_WIDTH 1 +#define MAC_VLANTR_EVLS_INDEX 21 +#define MAC_VLANTR_EVLS_WIDTH 2 +#define MAC_VLANTR_EVLRXS_INDEX 24 +#define MAC_VLANTR_EVLRXS_WIDTH 1 +#define MAC_VLANTR_VL_INDEX 0 +#define MAC_VLANTR_VL_WIDTH 16 +#define MAC_VLANTR_VTHM_INDEX 25 +#define MAC_VLANTR_VTHM_WIDTH 1 +#define MAC_VLANTR_VTIM_INDEX 17 +#define MAC_VLANTR_VTIM_WIDTH 1 +#define MAC_VR_DEVID_INDEX 8 +#define MAC_VR_DEVID_WIDTH 8 +#define MAC_VR_SNPSVER_INDEX 0 +#define MAC_VR_SNPSVER_WIDTH 8 +#define MAC_VR_USERVER_INDEX 16 +#define MAC_VR_USERVER_WIDTH 8 + +/* MMC register offsets */ +#define MMC_CR 0x0800 +#define MMC_RISR 0x0804 +#define MMC_TISR 0x0808 +#define MMC_RIER 0x080c +#define MMC_TIER 0x0810 +#define MMC_TXOCTETCOUNT_GB_LO 0x0814 +#define MMC_TXOCTETCOUNT_GB_HI 0x0818 +#define MMC_TXFRAMECOUNT_GB_LO 0x081c +#define MMC_TXFRAMECOUNT_GB_HI 0x0820 +#define MMC_TXBROADCASTFRAMES_G_LO 0x0824 +#define MMC_TXBROADCASTFRAMES_G_HI 0x0828 +#define MMC_TXMULTICASTFRAMES_G_LO 0x082c +#define MMC_TXMULTICASTFRAMES_G_HI 0x0830 +#define MMC_TX64OCTETS_GB_LO 0x0834 +#define MMC_TX64OCTETS_GB_HI 0x0838 +#define MMC_TX65TO127OCTETS_GB_LO 0x083c +#define MMC_TX65TO127OCTETS_GB_HI 0x0840 +#define MMC_TX128TO255OCTETS_GB_LO 0x0844 +#define MMC_TX128TO255OCTETS_GB_HI 0x0848 +#define MMC_TX256TO511OCTETS_GB_LO 0x084c +#define MMC_TX256TO511OCTETS_GB_HI 0x0850 +#define MMC_TX512TO1023OCTETS_GB_LO 0x0854 +#define MMC_TX512TO1023OCTETS_GB_HI 0x0858 +#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c +#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860 +#define MMC_TXUNICASTFRAMES_GB_LO 0x0864 +#define MMC_TXUNICASTFRAMES_GB_HI 0x0868 +#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c +#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870 +#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874 +#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878 +#define MMC_TXUNDERFLOWERROR_LO 0x087c +#define MMC_TXUNDERFLOWERROR_HI 0x0880 +#define MMC_TXOCTETCOUNT_G_LO 0x0884 +#define MMC_TXOCTETCOUNT_G_HI 0x0888 +#define MMC_TXFRAMECOUNT_G_LO 0x088c +#define MMC_TXFRAMECOUNT_G_HI 0x0890 +#define MMC_TXPAUSEFRAMES_LO 0x0894 +#define MMC_TXPAUSEFRAMES_HI 0x0898 +#define MMC_TXVLANFRAMES_G_LO 0x089c +#define MMC_TXVLANFRAMES_G_HI 0x08a0 +#define MMC_RXFRAMECOUNT_GB_LO 0x0900 +#define MMC_RXFRAMECOUNT_GB_HI 0x0904 +#define MMC_RXOCTETCOUNT_GB_LO 0x0908 +#define MMC_RXOCTETCOUNT_GB_HI 0x090c +#define MMC_RXOCTETCOUNT_G_LO 0x0910 +#define MMC_RXOCTETCOUNT_G_HI 0x0914 +#define MMC_RXBROADCASTFRAMES_G_LO 0x0918 +#define MMC_RXBROADCASTFRAMES_G_HI 0x091c +#define MMC_RXMULTICASTFRAMES_G_LO 0x0920 +#define MMC_RXMULTICASTFRAMES_G_HI 0x0924 +#define MMC_RXCRCERROR_LO 0x0928 +#define MMC_RXCRCERROR_HI 0x092c +#define MMC_RXRUNTERROR 0x0930 +#define MMC_RXJABBERERROR 0x0934 +#define MMC_RXUNDERSIZE_G 0x0938 +#define MMC_RXOVERSIZE_G 0x093c +#define MMC_RX64OCTETS_GB_LO 0x0940 +#define MMC_RX64OCTETS_GB_HI 0x0944 +#define MMC_RX65TO127OCTETS_GB_LO 0x0948 +#define MMC_RX65TO127OCTETS_GB_HI 0x094c +#define MMC_RX128TO255OCTETS_GB_LO 0x0950 +#define MMC_RX128TO255OCTETS_GB_HI 0x0954 +#define MMC_RX256TO511OCTETS_GB_LO 0x0958 +#define MMC_RX256TO511OCTETS_GB_HI 0x095c +#define MMC_RX512TO1023OCTETS_GB_LO 0x0960 +#define MMC_RX512TO1023OCTETS_GB_HI 0x0964 +#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968 +#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c +#define MMC_RXUNICASTFRAMES_G_LO 0x0970 +#define MMC_RXUNICASTFRAMES_G_HI 0x0974 +#define MMC_RXLENGTHERROR_LO 0x0978 +#define MMC_RXLENGTHERROR_HI 0x097c +#define MMC_RXOUTOFRANGETYPE_LO 0x0980 +#define MMC_RXOUTOFRANGETYPE_HI 0x0984 +#define MMC_RXPAUSEFRAMES_LO 0x0988 +#define MMC_RXPAUSEFRAMES_HI 0x098c +#define MMC_RXFIFOOVERFLOW_LO 0x0990 +#define MMC_RXFIFOOVERFLOW_HI 0x0994 +#define MMC_RXVLANFRAMES_GB_LO 0x0998 +#define MMC_RXVLANFRAMES_GB_HI 0x099c +#define MMC_RXWATCHDOGERROR 0x09a0 + +/* MMC register entry bit positions and sizes */ +#define MMC_CR_CR_INDEX 0 +#define MMC_CR_CR_WIDTH 1 +#define MMC_CR_CSR_INDEX 1 +#define MMC_CR_CSR_WIDTH 1 +#define MMC_CR_ROR_INDEX 2 +#define MMC_CR_ROR_WIDTH 1 +#define MMC_CR_MCF_INDEX 3 +#define MMC_CR_MCF_WIDTH 1 +#define MMC_CR_MCT_INDEX 4 +#define MMC_CR_MCT_WIDTH 2 +#define MMC_RIER_ALL_INTERRUPTS_INDEX 0 +#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23 +#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0 +#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1 +#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1 +#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1 +#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2 +#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1 +#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3 +#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4 +#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXCRCERROR_INDEX 5 +#define MMC_RISR_RXCRCERROR_WIDTH 1 +#define MMC_RISR_RXRUNTERROR_INDEX 6 +#define MMC_RISR_RXRUNTERROR_WIDTH 1 +#define MMC_RISR_RXJABBERERROR_INDEX 7 +#define MMC_RISR_RXJABBERERROR_WIDTH 1 +#define MMC_RISR_RXUNDERSIZE_G_INDEX 8 +#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1 +#define MMC_RISR_RXOVERSIZE_G_INDEX 9 +#define MMC_RISR_RXOVERSIZE_G_WIDTH 1 +#define MMC_RISR_RX64OCTETS_GB_INDEX 10 +#define MMC_RISR_RX64OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11 +#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12 +#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13 +#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14 +#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1 +#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16 +#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXLENGTHERROR_INDEX 17 +#define MMC_RISR_RXLENGTHERROR_WIDTH 1 +#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18 +#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1 +#define MMC_RISR_RXPAUSEFRAMES_INDEX 19 +#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1 +#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20 +#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1 +#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21 +#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1 +#define MMC_RISR_RXWATCHDOGERROR_INDEX 22 +#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1 +#define MMC_TIER_ALL_INTERRUPTS_INDEX 0 +#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18 +#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0 +#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1 +#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1 +#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1 +#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2 +#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1 +#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3 +#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1 +#define MMC_TISR_TX64OCTETS_GB_INDEX 4 +#define MMC_TISR_TX64OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5 +#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6 +#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7 +#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8 +#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1 +#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10 +#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11 +#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12 +#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13 +#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1 +#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14 +#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1 +#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15 +#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1 +#define MMC_TISR_TXPAUSEFRAMES_INDEX 16 +#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1 +#define MMC_TISR_TXVLANFRAMES_G_INDEX 17 +#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1 + +/* MTL register offsets */ +#define MTL_OMR 0x1000 +#define MTL_FDCR 0x1008 +#define MTL_FDSR 0x100c +#define MTL_FDDR 0x1010 +#define MTL_ISR 0x1020 +#define MTL_RQDCM0R 0x1030 +#define MTL_TCPM0R 0x1040 +#define MTL_TCPM1R 0x1044 + +#define MTL_RQDCM_INC 4 +#define MTL_RQDCM_Q_PER_REG 4 +#define MTL_TCPM_INC 4 +#define MTL_TCPM_TC_PER_REG 4 + +/* MTL register entry bit positions and sizes */ +#define MTL_OMR_ETSALG_INDEX 5 +#define MTL_OMR_ETSALG_WIDTH 2 +#define MTL_OMR_RAA_INDEX 2 +#define MTL_OMR_RAA_WIDTH 1 + +/* MTL queue register offsets + * Multiple queues can be active. The first queue has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +#define MTL_Q_BASE 0x1100 +#define MTL_Q_INC 0x80 + +#define MTL_Q_TQOMR 0x00 +#define MTL_Q_TQUR 0x04 +#define MTL_Q_TQDR 0x08 +#define MTL_Q_RQOMR 0x40 +#define MTL_Q_RQMPOCR 0x44 +#define MTL_Q_RQDR 0x48 +#define MTL_Q_RQFCR 0x50 +#define MTL_Q_IER 0x70 +#define MTL_Q_ISR 0x74 + +/* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQDR_PRXQ_INDEX 16 +#define MTL_Q_RQDR_PRXQ_WIDTH 14 +#define MTL_Q_RQDR_RXQSTS_INDEX 4 +#define MTL_Q_RQDR_RXQSTS_WIDTH 2 +#define MTL_Q_RQFCR_RFA_INDEX 1 +#define MTL_Q_RQFCR_RFA_WIDTH 6 +#define MTL_Q_RQFCR_RFD_INDEX 17 +#define MTL_Q_RQFCR_RFD_WIDTH 6 +#define MTL_Q_RQOMR_EHFC_INDEX 7 +#define MTL_Q_RQOMR_EHFC_WIDTH 1 +#define MTL_Q_RQOMR_RQS_INDEX 16 +#define MTL_Q_RQOMR_RQS_WIDTH 9 +#define MTL_Q_RQOMR_RSF_INDEX 5 +#define MTL_Q_RQOMR_RSF_WIDTH 1 +#define MTL_Q_RQOMR_RTC_INDEX 0 +#define MTL_Q_RQOMR_RTC_WIDTH 2 +#define MTL_Q_TQDR_TRCSTS_INDEX 1 +#define MTL_Q_TQDR_TRCSTS_WIDTH 2 +#define MTL_Q_TQDR_TXQSTS_INDEX 4 +#define MTL_Q_TQDR_TXQSTS_WIDTH 1 +#define MTL_Q_TQOMR_FTQ_INDEX 0 +#define MTL_Q_TQOMR_FTQ_WIDTH 1 +#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8 +#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3 +#define MTL_Q_TQOMR_TQS_INDEX 16 +#define MTL_Q_TQOMR_TQS_WIDTH 10 +#define MTL_Q_TQOMR_TSF_INDEX 1 +#define MTL_Q_TQOMR_TSF_WIDTH 1 +#define MTL_Q_TQOMR_TTC_INDEX 4 +#define MTL_Q_TQOMR_TTC_WIDTH 3 +#define MTL_Q_TQOMR_TXQEN_INDEX 2 +#define MTL_Q_TQOMR_TXQEN_WIDTH 2 + +/* MTL queue register value */ +#define MTL_RSF_DISABLE 0x00 +#define MTL_RSF_ENABLE 0x01 +#define MTL_TSF_DISABLE 0x00 +#define MTL_TSF_ENABLE 0x01 + +#define MTL_RX_THRESHOLD_64 0x00 +#define MTL_RX_THRESHOLD_96 0x02 +#define MTL_RX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_32 0x01 +#define MTL_TX_THRESHOLD_64 0x00 +#define MTL_TX_THRESHOLD_96 0x02 +#define MTL_TX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_192 0x04 +#define MTL_TX_THRESHOLD_256 0x05 +#define MTL_TX_THRESHOLD_384 0x06 +#define MTL_TX_THRESHOLD_512 0x07 + +#define MTL_ETSALG_WRR 0x00 +#define MTL_ETSALG_WFQ 0x01 +#define MTL_ETSALG_DWRR 0x02 +#define MTL_RAA_SP 0x00 +#define MTL_RAA_WSP 0x01 + +#define MTL_Q_DISABLED 0x00 +#define MTL_Q_ENABLED 0x02 + +/* MTL traffic class register offsets + * Multiple traffic classes can be active. The first class has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +#define MTL_TC_BASE MTL_Q_BASE +#define MTL_TC_INC MTL_Q_INC + +#define MTL_TC_ETSCR 0x10 +#define MTL_TC_ETSSR 0x14 +#define MTL_TC_QWR 0x18 + +/* MTL traffic class register entry bit positions and sizes */ +#define MTL_TC_ETSCR_TSA_INDEX 0 +#define MTL_TC_ETSCR_TSA_WIDTH 2 +#define MTL_TC_QWR_QW_INDEX 0 +#define MTL_TC_QWR_QW_WIDTH 21 + +/* MTL traffic class register value */ +#define MTL_TSA_SP 0x00 +#define MTL_TSA_ETS 0x02 + +/* PCS register offsets */ +#define PCS_V1_WINDOW_SELECT 0x03fc +#define PCS_V2_WINDOW_DEF 0x9060 +#define PCS_V2_WINDOW_SELECT 0x9064 + +/* PCS register entry bit positions and sizes */ +#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 +#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14 +#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2 +#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4 + +/* SerDes integration register offsets */ +#define SIR0_KR_RT_1 0x002c +#define SIR0_STATUS 0x0040 +#define SIR1_SPEED 0x0000 + +/* SerDes integration register entry bit positions and sizes */ +#define SIR0_KR_RT_1_RESET_INDEX 11 +#define SIR0_KR_RT_1_RESET_WIDTH 1 +#define SIR0_STATUS_RX_READY_INDEX 0 +#define SIR0_STATUS_RX_READY_WIDTH 1 +#define SIR0_STATUS_TX_READY_INDEX 8 +#define SIR0_STATUS_TX_READY_WIDTH 1 +#define SIR1_SPEED_CDR_RATE_INDEX 12 +#define SIR1_SPEED_CDR_RATE_WIDTH 4 +#define SIR1_SPEED_DATARATE_INDEX 4 +#define SIR1_SPEED_DATARATE_WIDTH 2 +#define SIR1_SPEED_PLLSEL_INDEX 3 +#define SIR1_SPEED_PLLSEL_WIDTH 1 +#define SIR1_SPEED_RATECHANGE_INDEX 6 +#define SIR1_SPEED_RATECHANGE_WIDTH 1 +#define SIR1_SPEED_TXAMP_INDEX 8 +#define SIR1_SPEED_TXAMP_WIDTH 4 +#define SIR1_SPEED_WORDMODE_INDEX 0 +#define SIR1_SPEED_WORDMODE_WIDTH 3 + +/* SerDes RxTx register offsets */ +#define RXTX_REG6 0x0018 +#define RXTX_REG20 0x0050 +#define RXTX_REG22 0x0058 +#define RXTX_REG114 0x01c8 +#define RXTX_REG129 0x0204 + +/* SerDes RxTx register entry bit positions and sizes */ +#define RXTX_REG6_RESETB_RXD_INDEX 8 +#define RXTX_REG6_RESETB_RXD_WIDTH 1 +#define RXTX_REG20_BLWC_ENA_INDEX 2 +#define RXTX_REG20_BLWC_ENA_WIDTH 1 +#define RXTX_REG114_PQ_REG_INDEX 9 +#define RXTX_REG114_PQ_REG_WIDTH 7 +#define RXTX_REG129_RXDFE_CONFIG_INDEX 14 +#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2 + +/* MAC Control register offsets */ +#define XP_PROP_0 0x0000 +#define XP_PROP_1 0x0004 +#define XP_PROP_2 0x0008 +#define XP_PROP_3 0x000c +#define XP_PROP_4 0x0010 +#define XP_PROP_5 0x0014 +#define XP_MAC_ADDR_LO 0x0020 +#define XP_MAC_ADDR_HI 0x0024 +#define XP_ECC_ISR 0x0030 +#define XP_ECC_IER 0x0034 +#define XP_ECC_CNT0 0x003c +#define XP_ECC_CNT1 0x0040 +#define XP_DRIVER_INT_REQ 0x0060 +#define XP_DRIVER_INT_RO 0x0064 +#define XP_DRIVER_SCRATCH_0 0x0068 +#define XP_DRIVER_SCRATCH_1 0x006c +#define XP_INT_EN 0x0078 +#define XP_I2C_MUTEX 0x0080 +#define XP_MDIO_MUTEX 0x0084 + +/* MAC Control register entry bit positions and sizes */ +#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0 +#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1 +#define XP_DRIVER_INT_RO_STATUS_INDEX 0 +#define XP_DRIVER_INT_RO_STATUS_WIDTH 1 +#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0 +#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8 +#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8 +#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8 +#define XP_ECC_CNT0_RX_DED_INDEX 24 +#define XP_ECC_CNT0_RX_DED_WIDTH 8 +#define XP_ECC_CNT0_RX_SEC_INDEX 16 +#define XP_ECC_CNT0_RX_SEC_WIDTH 8 +#define XP_ECC_CNT0_TX_DED_INDEX 8 +#define XP_ECC_CNT0_TX_DED_WIDTH 8 +#define XP_ECC_CNT0_TX_SEC_INDEX 0 +#define XP_ECC_CNT0_TX_SEC_WIDTH 8 +#define XP_ECC_CNT1_DESC_DED_INDEX 8 +#define XP_ECC_CNT1_DESC_DED_WIDTH 8 +#define XP_ECC_CNT1_DESC_SEC_INDEX 0 +#define XP_ECC_CNT1_DESC_SEC_WIDTH 8 +#define XP_ECC_IER_DESC_DED_INDEX 0 +#define XP_ECC_IER_DESC_DED_WIDTH 1 +#define XP_ECC_IER_DESC_SEC_INDEX 1 +#define XP_ECC_IER_DESC_SEC_WIDTH 1 +#define XP_ECC_IER_RX_DED_INDEX 2 +#define XP_ECC_IER_RX_DED_WIDTH 1 +#define XP_ECC_IER_RX_SEC_INDEX 3 +#define XP_ECC_IER_RX_SEC_WIDTH 1 +#define XP_ECC_IER_TX_DED_INDEX 4 +#define XP_ECC_IER_TX_DED_WIDTH 1 +#define XP_ECC_IER_TX_SEC_INDEX 5 +#define XP_ECC_IER_TX_SEC_WIDTH 1 +#define XP_ECC_ISR_DESC_DED_INDEX 0 +#define XP_ECC_ISR_DESC_DED_WIDTH 1 +#define XP_ECC_ISR_DESC_SEC_INDEX 1 +#define XP_ECC_ISR_DESC_SEC_WIDTH 1 +#define XP_ECC_ISR_RX_DED_INDEX 2 +#define XP_ECC_ISR_RX_DED_WIDTH 1 +#define XP_ECC_ISR_RX_SEC_INDEX 3 +#define XP_ECC_ISR_RX_SEC_WIDTH 1 +#define XP_ECC_ISR_TX_DED_INDEX 4 +#define XP_ECC_ISR_TX_DED_WIDTH 1 +#define XP_ECC_ISR_TX_SEC_INDEX 5 +#define XP_ECC_ISR_TX_SEC_WIDTH 1 +#define XP_I2C_MUTEX_BUSY_INDEX 31 +#define XP_I2C_MUTEX_BUSY_WIDTH 1 +#define XP_I2C_MUTEX_ID_INDEX 29 +#define XP_I2C_MUTEX_ID_WIDTH 2 +#define XP_I2C_MUTEX_ACTIVE_INDEX 0 +#define XP_I2C_MUTEX_ACTIVE_WIDTH 1 +#define XP_MAC_ADDR_HI_VALID_INDEX 31 +#define XP_MAC_ADDR_HI_VALID_WIDTH 1 +#define XP_PROP_0_CONN_TYPE_INDEX 28 +#define XP_PROP_0_CONN_TYPE_WIDTH 3 +#define XP_PROP_0_MDIO_ADDR_INDEX 16 +#define XP_PROP_0_MDIO_ADDR_WIDTH 5 +#define XP_PROP_0_PORT_ID_INDEX 0 +#define XP_PROP_0_PORT_ID_WIDTH 8 +#define XP_PROP_0_PORT_MODE_INDEX 8 +#define XP_PROP_0_PORT_MODE_WIDTH 4 +#define XP_PROP_0_PORT_SPEEDS_INDEX 23 +#define XP_PROP_0_PORT_SPEEDS_WIDTH 4 +#define XP_PROP_1_MAX_RX_DMA_INDEX 24 +#define XP_PROP_1_MAX_RX_DMA_WIDTH 5 +#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8 +#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5 +#define XP_PROP_1_MAX_TX_DMA_INDEX 16 +#define XP_PROP_1_MAX_TX_DMA_WIDTH 5 +#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0 +#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5 +#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16 +#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16 +#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0 +#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16 +#define XP_PROP_3_GPIO_MASK_INDEX 28 +#define XP_PROP_3_GPIO_MASK_WIDTH 4 +#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20 +#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4 +#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16 +#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4 +#define XP_PROP_3_GPIO_RX_LOS_INDEX 24 +#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4 +#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12 +#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4 +#define XP_PROP_3_GPIO_ADDR_INDEX 8 +#define XP_PROP_3_GPIO_ADDR_WIDTH 3 +#define XP_PROP_3_MDIO_RESET_INDEX 0 +#define XP_PROP_3_MDIO_RESET_WIDTH 2 +#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8 +#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3 +#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12 +#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4 +#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4 +#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2 +#define XP_PROP_4_MUX_ADDR_HI_INDEX 8 +#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5 +#define XP_PROP_4_MUX_ADDR_LO_INDEX 0 +#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3 +#define XP_PROP_4_MUX_CHAN_INDEX 4 +#define XP_PROP_4_MUX_CHAN_WIDTH 3 +#define XP_PROP_4_REDRV_ADDR_INDEX 16 +#define XP_PROP_4_REDRV_ADDR_WIDTH 7 +#define XP_PROP_4_REDRV_IF_INDEX 23 +#define XP_PROP_4_REDRV_IF_WIDTH 1 +#define XP_PROP_4_REDRV_LANE_INDEX 24 +#define XP_PROP_4_REDRV_LANE_WIDTH 3 +#define XP_PROP_4_REDRV_MODEL_INDEX 28 +#define XP_PROP_4_REDRV_MODEL_WIDTH 3 +#define XP_PROP_4_REDRV_PRESENT_INDEX 31 +#define XP_PROP_4_REDRV_PRESENT_WIDTH 1 + +/* I2C Control register offsets */ +#define IC_CON 0x0000 +#define IC_TAR 0x0004 +#define IC_DATA_CMD 0x0010 +#define IC_INTR_STAT 0x002c +#define IC_INTR_MASK 0x0030 +#define IC_RAW_INTR_STAT 0x0034 +#define IC_CLR_INTR 0x0040 +#define IC_CLR_TX_ABRT 0x0054 +#define IC_CLR_STOP_DET 0x0060 +#define IC_ENABLE 0x006c +#define IC_TXFLR 0x0074 +#define IC_RXFLR 0x0078 +#define IC_TX_ABRT_SOURCE 0x0080 +#define IC_ENABLE_STATUS 0x009c +#define IC_COMP_PARAM_1 0x00f4 + +/* I2C Control register entry bit positions and sizes */ +#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2 +#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2 +#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8 +#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8 +#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16 +#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8 +#define IC_CON_MASTER_MODE_INDEX 0 +#define IC_CON_MASTER_MODE_WIDTH 1 +#define IC_CON_RESTART_EN_INDEX 5 +#define IC_CON_RESTART_EN_WIDTH 1 +#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9 +#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1 +#define IC_CON_SLAVE_DISABLE_INDEX 6 +#define IC_CON_SLAVE_DISABLE_WIDTH 1 +#define IC_CON_SPEED_INDEX 1 +#define IC_CON_SPEED_WIDTH 2 +#define IC_DATA_CMD_CMD_INDEX 8 +#define IC_DATA_CMD_CMD_WIDTH 1 +#define IC_DATA_CMD_STOP_INDEX 9 +#define IC_DATA_CMD_STOP_WIDTH 1 +#define IC_ENABLE_ABORT_INDEX 1 +#define IC_ENABLE_ABORT_WIDTH 1 +#define IC_ENABLE_EN_INDEX 0 +#define IC_ENABLE_EN_WIDTH 1 +#define IC_ENABLE_STATUS_EN_INDEX 0 +#define IC_ENABLE_STATUS_EN_WIDTH 1 +#define IC_INTR_MASK_TX_EMPTY_INDEX 4 +#define IC_INTR_MASK_TX_EMPTY_WIDTH 1 +#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2 +#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1 +#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9 +#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1 +#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6 +#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1 +#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4 +#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1 + +/* I2C Control register value */ +#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001 +#define IC_TX_ABRT_ARB_LOST 0x1000 + +/* Descriptor/Packet entry bit positions and sizes */ +#define RX_PACKET_ERRORS_CRC_INDEX 2 +#define RX_PACKET_ERRORS_CRC_WIDTH 1 +#define RX_PACKET_ERRORS_FRAME_INDEX 3 +#define RX_PACKET_ERRORS_FRAME_WIDTH 1 +#define RX_PACKET_ERRORS_LENGTH_INDEX 0 +#define RX_PACKET_ERRORS_LENGTH_WIDTH 1 +#define RX_PACKET_ERRORS_OVERRUN_INDEX 1 +#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1 + +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0 +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 +#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 + +#define RX_NORMAL_DESC0_OVT_INDEX 0 +#define RX_NORMAL_DESC0_OVT_WIDTH 16 +#define RX_NORMAL_DESC2_HL_INDEX 0 +#define RX_NORMAL_DESC2_HL_WIDTH 10 +#define RX_NORMAL_DESC3_CDA_INDEX 27 +#define RX_NORMAL_DESC3_CDA_WIDTH 1 +#define RX_NORMAL_DESC3_CTXT_INDEX 30 +#define RX_NORMAL_DESC3_CTXT_WIDTH 1 +#define RX_NORMAL_DESC3_ES_INDEX 15 +#define RX_NORMAL_DESC3_ES_WIDTH 1 +#define RX_NORMAL_DESC3_ETLT_INDEX 16 +#define RX_NORMAL_DESC3_ETLT_WIDTH 4 +#define RX_NORMAL_DESC3_FD_INDEX 29 +#define RX_NORMAL_DESC3_FD_WIDTH 1 +#define RX_NORMAL_DESC3_INTE_INDEX 30 +#define RX_NORMAL_DESC3_INTE_WIDTH 1 +#define RX_NORMAL_DESC3_L34T_INDEX 20 +#define RX_NORMAL_DESC3_L34T_WIDTH 4 +#define RX_NORMAL_DESC3_LD_INDEX 28 +#define RX_NORMAL_DESC3_LD_WIDTH 1 +#define RX_NORMAL_DESC3_OWN_INDEX 31 +#define RX_NORMAL_DESC3_OWN_WIDTH 1 +#define RX_NORMAL_DESC3_PL_INDEX 0 +#define RX_NORMAL_DESC3_PL_WIDTH 14 +#define RX_NORMAL_DESC3_RSV_INDEX 26 +#define RX_NORMAL_DESC3_RSV_WIDTH 1 + +#define RX_DESC3_L34T_IPV4_TCP 1 +#define RX_DESC3_L34T_IPV4_UDP 2 +#define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV6_TCP 9 +#define RX_DESC3_L34T_IPV6_UDP 10 +#define RX_DESC3_L34T_IPV6_ICMP 11 + +#define RX_CONTEXT_DESC3_TSA_INDEX 4 +#define RX_CONTEXT_DESC3_TSA_WIDTH 1 +#define RX_CONTEXT_DESC3_TSD_INDEX 6 +#define RX_CONTEXT_DESC3_TSD_WIDTH 1 + +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0 +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3 +#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1 + +#define TX_CONTEXT_DESC2_MSS_INDEX 0 +#define TX_CONTEXT_DESC2_MSS_WIDTH 15 +#define TX_CONTEXT_DESC3_CTXT_INDEX 30 +#define TX_CONTEXT_DESC3_CTXT_WIDTH 1 +#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26 +#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1 +#define TX_CONTEXT_DESC3_VLTV_INDEX 16 +#define TX_CONTEXT_DESC3_VLTV_WIDTH 1 +#define TX_CONTEXT_DESC3_VT_INDEX 0 +#define TX_CONTEXT_DESC3_VT_WIDTH 16 + +#define TX_NORMAL_DESC2_HL_B1L_INDEX 0 +#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14 +#define TX_NORMAL_DESC2_IC_INDEX 31 +#define TX_NORMAL_DESC2_IC_WIDTH 1 +#define TX_NORMAL_DESC2_TTSE_INDEX 30 +#define TX_NORMAL_DESC2_TTSE_WIDTH 1 +#define TX_NORMAL_DESC2_VTIR_INDEX 14 +#define TX_NORMAL_DESC2_VTIR_WIDTH 2 +#define TX_NORMAL_DESC3_CIC_INDEX 16 +#define TX_NORMAL_DESC3_CIC_WIDTH 2 +#define TX_NORMAL_DESC3_CPC_INDEX 26 +#define TX_NORMAL_DESC3_CPC_WIDTH 2 +#define TX_NORMAL_DESC3_CTXT_INDEX 30 +#define TX_NORMAL_DESC3_CTXT_WIDTH 1 +#define TX_NORMAL_DESC3_FD_INDEX 29 +#define TX_NORMAL_DESC3_FD_WIDTH 1 +#define TX_NORMAL_DESC3_FL_INDEX 0 +#define TX_NORMAL_DESC3_FL_WIDTH 15 +#define TX_NORMAL_DESC3_LD_INDEX 28 +#define TX_NORMAL_DESC3_LD_WIDTH 1 +#define TX_NORMAL_DESC3_OWN_INDEX 31 +#define TX_NORMAL_DESC3_OWN_WIDTH 1 +#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19 +#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4 +#define TX_NORMAL_DESC3_TCPPL_INDEX 0 +#define TX_NORMAL_DESC3_TCPPL_WIDTH 18 +#define TX_NORMAL_DESC3_TSE_INDEX 18 +#define TX_NORMAL_DESC3_TSE_WIDTH 1 + +#define TX_NORMAL_DESC2_VLAN_INSERT 0x2 + +/* MDIO undefined or vendor specific registers */ +#ifndef MDIO_PMA_10GBR_PMD_CTRL +#define MDIO_PMA_10GBR_PMD_CTRL 0x0096 +#endif + +#ifndef MDIO_PMA_10GBR_FECCTRL +#define MDIO_PMA_10GBR_FECCTRL 0x00ab +#endif + +#ifndef MDIO_PCS_DIG_CTRL +#define MDIO_PCS_DIG_CTRL 0x8000 +#endif + +#ifndef MDIO_AN_XNP +#define MDIO_AN_XNP 0x0016 +#endif + +#ifndef MDIO_AN_LPX +#define MDIO_AN_LPX 0x0019 +#endif + +#ifndef MDIO_AN_COMP_STAT +#define MDIO_AN_COMP_STAT 0x0030 +#endif + +#ifndef MDIO_AN_INTMASK +#define MDIO_AN_INTMASK 0x8001 +#endif + +#ifndef MDIO_AN_INT +#define MDIO_AN_INT 0x8002 +#endif + +#ifndef MDIO_VEND2_AN_ADVERTISE +#define MDIO_VEND2_AN_ADVERTISE 0x0004 +#endif + +#ifndef MDIO_VEND2_AN_LP_ABILITY +#define MDIO_VEND2_AN_LP_ABILITY 0x0005 +#endif + +#ifndef MDIO_VEND2_AN_CTRL +#define MDIO_VEND2_AN_CTRL 0x8001 +#endif + +#ifndef MDIO_VEND2_AN_STAT +#define MDIO_VEND2_AN_STAT 0x8002 +#endif + +#ifndef MDIO_VEND2_PMA_CDR_CONTROL +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 +#endif + +#ifndef MDIO_CTRL1_SPEED1G +#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) +#endif + +#ifndef MDIO_VEND2_CTRL1_AN_ENABLE +#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12) +#endif + +#ifndef MDIO_VEND2_CTRL1_AN_RESTART +#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9) +#endif + +#ifndef MDIO_VEND2_CTRL1_SS6 +#define MDIO_VEND2_CTRL1_SS6 BIT(6) +#endif + +#ifndef MDIO_VEND2_CTRL1_SS13 +#define MDIO_VEND2_CTRL1_SS13 BIT(13) +#endif + +/* MDIO mask values */ +#define AXGBE_AN_CL73_INT_CMPLT BIT(0) +#define AXGBE_AN_CL73_INC_LINK BIT(1) +#define AXGBE_AN_CL73_PG_RCV BIT(2) +#define AXGBE_AN_CL73_INT_MASK 0x07 + +#define AXGBE_XNP_MCF_NULL_MESSAGE 0x001 +#define AXGBE_XNP_ACK_PROCESSED BIT(12) +#define AXGBE_XNP_MP_FORMATTED BIT(13) +#define AXGBE_XNP_NP_EXCHANGE BIT(15) + +#define AXGBE_KR_TRAINING_START BIT(0) +#define AXGBE_KR_TRAINING_ENABLE BIT(1) + +#define AXGBE_PCS_CL37_BP BIT(12) + +#define AXGBE_AN_CL37_INT_CMPLT BIT(0) +#define AXGBE_AN_CL37_INT_MASK 0x01 + +#define AXGBE_AN_CL37_HD_MASK 0x40 +#define AXGBE_AN_CL37_FD_MASK 0x20 + +#define AXGBE_AN_CL37_PCS_MODE_MASK 0x06 +#define AXGBE_AN_CL37_PCS_MODE_BASEX 0x00 +#define AXGBE_AN_CL37_PCS_MODE_SGMII 0x04 +#define AXGBE_AN_CL37_TX_CONFIG_MASK 0x08 + +#define AXGBE_PMA_CDR_TRACK_EN_MASK 0x01 +#define AXGBE_PMA_CDR_TRACK_EN_OFF 0x00 +#define AXGBE_PMA_CDR_TRACK_EN_ON 0x01 + +/*generic*/ +#define __iomem + +#define rmb() rte_rmb() /* dpdk rte provided rmb */ +#define wmb() rte_wmb() /* dpdk rte provided wmb */ + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long long u64; +typedef unsigned long long dma_addr_t; + +static inline uint32_t low32_value(uint64_t addr) +{ + return (addr) & 0x0ffffffff; +} + +static inline uint32_t high32_value(uint64_t addr) +{ + return (addr >> 32) & 0x0ffffffff; +} + +/*END*/ + +/* Bit setting and getting macros + * The get macro will extract the current bit field value from within + * the variable + * + * The set macro will clear the current bit field value within the + * variable and then set the bit field of the variable to the + * specified value + */ +#define GET_BITS(_var, _index, _width) \ + (((_var) >> (_index)) & ((0x1 << (_width)) - 1)) + +#define SET_BITS(_var, _index, _width, _val) \ +do { \ + (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \ + (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \ +} while (0) + +#define GET_BITS_LE(_var, _index, _width) \ + ((rte_le_to_cpu_32((_var)) >> (_index)) & ((0x1 << (_width)) - 1)) + +#define SET_BITS_LE(_var, _index, _width, _val) \ +do { \ + (_var) &= rte_cpu_to_le_32(~(((0x1 << (_width)) - 1) << (_index)));\ + (_var) |= rte_cpu_to_le_32((((_val) & \ + ((0x1 << (_width)) - 1)) << (_index))); \ +} while (0) + +/* Bit setting and getting macros based on register fields + * The get macro uses the bit field definitions formed using the input + * names to extract the current bit field value from within the + * variable + * + * The set macro uses the bit field definitions formed using the input + * names to set the bit field of the variable to the specified value + */ +#define AXGMAC_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define AXGMAC_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define AXGMAC_GET_BITS_LE(_var, _prefix, _field) \ + GET_BITS_LE((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define AXGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \ + SET_BITS_LE((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +/* Macros for reading or writing registers + * The ioread macros will get bit fields or full values using the + * register definitions formed using the input names + * + * The iowrite macros will set bit fields or full values using the + * register definitions formed using the input names + */ +#define AXGMAC_IOREAD(_pdata, _reg) \ + rte_read32((uint8_t *)((_pdata)->xgmac_regs) + (_reg)) + +#define AXGMAC_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(AXGMAC_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define AXGMAC_IOWRITE(_pdata, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_pdata)->xgmac_regs) + (_reg)) + +#define AXGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u32 reg_val = AXGMAC_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + AXGMAC_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +/* Macros for reading or writing MTL queue or traffic class registers + * Similar to the standard read and write macros except that the + * base register value is calculated by the queue or traffic class number + */ +#define AXGMAC_MTL_IOREAD(_pdata, _n, _reg) \ + rte_read32((uint8_t *)((_pdata)->xgmac_regs) + \ + MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg)) + +#define AXGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \ + GET_BITS(AXGMAC_MTL_IOREAD((_pdata), (_n), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define AXGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \ + rte_write32((_val), (uint8_t *)((_pdata)->xgmac_regs) +\ + MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg)) + +#define AXGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \ +do { \ + u32 reg_val = AXGMAC_MTL_IOREAD((_pdata), (_n), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + AXGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \ +} while (0) + +/* Macros for reading or writing DMA channel registers + * Similar to the standard read and write macros except that the + * base register value is obtained from the ring + */ +#define AXGMAC_DMA_IOREAD(_channel, _reg) \ + rte_read32((uint8_t *)((_channel)->dma_regs) + (_reg)) + +#define AXGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \ + GET_BITS(AXGMAC_DMA_IOREAD((_channel), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define AXGMAC_DMA_IOWRITE(_channel, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_channel)->dma_regs) + (_reg)) + +#define AXGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \ +do { \ + u32 reg_val = AXGMAC_DMA_IOREAD((_channel), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + AXGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of XPCS registers. + */ +#define XPCS_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XPCS_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XPCS32_IOWRITE(_pdata, _off, _val) \ + rte_write32(_val, \ + (uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +#define XPCS32_IOREAD(_pdata, _off) \ + rte_read32((uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +#define XPCS16_IOWRITE(_pdata, _off, _val) \ + rte_write16(_val, \ + (uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +#define XPCS16_IOREAD(_pdata, _off) \ + rte_read16((uint8_t *)((_pdata)->xpcs_regs) + (_off)) + +/* Macros for building, reading or writing register values or bits + * within the register values of SerDes integration registers. + */ +#define XSIR_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XSIR_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XSIR0_IOREAD(_pdata, _reg) \ + rte_read16((uint8_t *)((_pdata)->sir0_regs) + (_reg)) + +#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XSIR0_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XSIR0_IOWRITE(_pdata, _reg, _val) \ + rte_write16((_val), \ + (uint8_t *)((_pdata)->sir0_regs) + (_reg)) + +#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XSIR0_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +#define XSIR1_IOREAD(_pdata, _reg) \ + rte_read16((uint8_t *)((_pdata)->sir1_regs) + _reg) + +#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XSIR1_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XSIR1_IOWRITE(_pdata, _reg, _val) \ + rte_write16((_val), \ + (uint8_t *)((_pdata)->sir1_regs) + (_reg)) + +#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XSIR1_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of SerDes RxTx registers. + */ +#define XRXTX_IOREAD(_pdata, _reg) \ + rte_read16((uint8_t *)((_pdata)->rxtx_regs) + (_reg)) + +#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XRXTX_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XRXTX_IOWRITE(_pdata, _reg, _val) \ + rte_write16((_val), \ + (uint8_t *)((_pdata)->rxtx_regs) + (_reg)) + +#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XRXTX_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of MAC Control registers. + */ +#define XP_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XP_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XP_IOREAD(_pdata, _reg) \ + rte_read32((uint8_t *)((_pdata)->xprop_regs) + (_reg)) + +#define XP_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XP_IOREAD((_pdata), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XP_IOWRITE(_pdata, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_pdata)->xprop_regs) + (_reg)) + +#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u32 reg_val = XP_IOREAD((_pdata), (_reg)); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XP_IOWRITE((_pdata), (_reg), reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of I2C Control registers. + */ +#define XI2C_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XI2C_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XI2C_IOREAD(_pdata, _reg) \ + rte_read32((uint8_t *)((_pdata)->xi2c_regs) + (_reg)) + +#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XI2C_IOWRITE(_pdata, _reg, _val) \ + rte_write32((_val), \ + (uint8_t *)((_pdata)->xi2c_regs) + (_reg)) + +#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XI2C_IOWRITE((_pdata), (_reg), reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * using MDIO. Different from above because of the use of standardized + * Linux include values. No shifting is performed with the bit + * operations, everything works on mask values. + */ +#define XMDIO_READ(_pdata, _mmd, _reg) \ + ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \ + MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff))) + +#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \ + (XMDIO_READ((_pdata), _mmd, _reg) & _mask) + +#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \ + ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \ + MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff), (_val))) + +#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \ +do { \ + u32 mmd_val = XMDIO_READ((_pdata), (_mmd), (_reg)); \ + mmd_val &= ~(_mask); \ + mmd_val |= (_val); \ + XMDIO_WRITE((_pdata), (_mmd), (_reg), (mmd_val)); \ +} while (0) + +/* + * time_after(a,b) returns true if the time a is after time b. + * + * Do this with "<0" and ">=0" to only test the sign of the result. A + * good compiler would generate better code (and a really good compiler + * wouldn't care). Gcc is currently neither. + */ +#define time_after(a, b) ((long)((b) - (a)) < 0) +#define time_before(a, b) time_after(b, a) + +#define time_after_eq(a, b) ((long)((a) - (b)) >= 0) +#define time_before_eq(a, b) time_after_eq(b, a) + +/*---bitmap support apis---*/ +static inline int axgbe_test_bit(int nr, volatile unsigned long *addr) +{ + int res; + + rte_mb(); + res = ((*addr) & (1UL << nr)) != 0; + rte_mb(); + return res; +} + +static inline void axgbe_set_bit(unsigned int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_or(addr, (1UL << nr)); +} + +static inline void axgbe_clear_bit(int nr, volatile unsigned long *addr) +{ + __sync_fetch_and_and(addr, ~(1UL << nr)); +} + +static inline int axgbe_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = (1UL << nr); + + return __sync_fetch_and_and(addr, ~mask) & mask; +} + +static inline unsigned long msecs_to_timer_cycles(unsigned int m) +{ + return rte_get_timer_hz() * (m / 1000); +} + +#endif /* __AXGBE_COMMON_H__ */ diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c new file mode 100644 index 00000000..707f1ee9 --- /dev/null +++ b/drivers/net/axgbe/axgbe_dev.c @@ -0,0 +1,1103 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" +#include "axgbe_rxtx.h" + +static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata) +{ + return pdata->eth_dev->data->mtu + ETHER_HDR_LEN + + ETHER_CRC_LEN + VLAN_HLEN; +} + +/* query busy bit */ +static int mdio_complete(struct axgbe_port *pdata) +{ + if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY)) + return 1; + + return 0; +} + +static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr, + int reg, u16 val) +{ + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; + + mdio_sca = 0; + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); + + timeout = rte_get_timer_cycles() + rte_get_timer_hz(); + while (time_before(rte_get_timer_cycles(), timeout)) { + rte_delay_us(100); + if (mdio_complete(pdata)) + return 0; + } + + PMD_DRV_LOG(ERR, "Mdio write operation timed out\n"); + return -ETIMEDOUT; +} + +static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr, + int reg) +{ + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; + + mdio_sca = 0; + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); + AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); + AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); + + timeout = rte_get_timer_cycles() + rte_get_timer_hz(); + + while (time_before(rte_get_timer_cycles(), timeout)) { + rte_delay_us(100); + if (mdio_complete(pdata)) + goto success; + } + + PMD_DRV_LOG(ERR, "Mdio read operation timed out\n"); + return -ETIMEDOUT; + +success: + return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA); +} + +static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port, + enum axgbe_mdio_mode mode) +{ + unsigned int reg_val = 0; + + switch (mode) { + case AXGBE_MDIO_MODE_CL22: + if (port > AXGMAC_MAX_C22_PORT) + return -EINVAL; + reg_val |= (1 << port); + break; + case AXGBE_MDIO_MODE_CL45: + break; + default: + return -EINVAL; + } + AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); + + return 0; +} + +static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata, + int prtad __rte_unused, int mmd_reg) +{ + unsigned int mmd_address, index, offset; + int mmd_data; + + if (mmd_reg & MII_ADDR_C45) + mmd_address = mmd_reg & ~MII_ADDR_C45; + else + mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + + /* The PCS registers are accessed using mmio. The underlying + * management interface uses indirect addressing to access the MMD + * register sets. This requires accessing of the PCS register in two + * phases, an address phase and a data phase. + * + * The mmio interface is based on 16-bit offsets and values. All + * register offsets must therefore be adjusted by left shifting the + * offset 1 bit and reading 16 bits of data. + */ + mmd_address <<= 1; + index = mmd_address & ~pdata->xpcs_window_mask; + offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + + pthread_mutex_lock(&pdata->xpcs_mutex); + + XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); + mmd_data = XPCS16_IOREAD(pdata, offset); + + pthread_mutex_unlock(&pdata->xpcs_mutex); + + return mmd_data; +} + +static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata, + int prtad __rte_unused, + int mmd_reg, int mmd_data) +{ + unsigned int mmd_address, index, offset; + + if (mmd_reg & MII_ADDR_C45) + mmd_address = mmd_reg & ~MII_ADDR_C45; + else + mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + + /* The PCS registers are accessed using mmio. The underlying + * management interface uses indirect addressing to access the MMD + * register sets. This requires accessing of the PCS register in two + * phases, an address phase and a data phase. + * + * The mmio interface is based on 16-bit offsets and values. All + * register offsets must therefore be adjusted by left shifting the + * offset 1 bit and writing 16 bits of data. + */ + mmd_address <<= 1; + index = mmd_address & ~pdata->xpcs_window_mask; + offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + + pthread_mutex_lock(&pdata->xpcs_mutex); + + XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); + XPCS16_IOWRITE(pdata, offset, mmd_data); + + pthread_mutex_unlock(&pdata->xpcs_mutex); +} + +static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad, + int mmd_reg) +{ + switch (pdata->vdata->xpcs_access) { + case AXGBE_XPCS_ACCESS_V1: + PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n"); + return -1; + case AXGBE_XPCS_ACCESS_V2: + default: + return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); + } +} + +static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad, + int mmd_reg, int mmd_data) +{ + switch (pdata->vdata->xpcs_access) { + case AXGBE_XPCS_ACCESS_V1: + PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n"); + return; + case AXGBE_XPCS_ACCESS_V2: + default: + return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); + } +} + +static int axgbe_set_speed(struct axgbe_port *pdata, int speed) +{ + unsigned int ss; + + switch (speed) { + case SPEED_1000: + ss = 0x03; + break; + case SPEED_2500: + ss = 0x02; + break; + case SPEED_10000: + ss = 0x00; + break; + default: + return -EINVAL; + } + + if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) + AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); + + return 0; +} + +static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, reg_val; + unsigned int i; + + /* Clear MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); + + /* Clear MAC flow control */ + max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = RTE_MIN(pdata->tx_q_count, + max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); + AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); + AXGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, reg_val; + unsigned int i; + + /* Set MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { + unsigned int ehfc = 0; + + /* Flow control thresholds are established */ + if (pdata->rx_rfd[i]) + ehfc = 1; + + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); + } + + /* Set MAC flow control */ + max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = RTE_MIN(pdata->tx_q_count, + max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); + + /* Enable transmit flow control */ + AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); + /* Set pause time */ + AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); + + AXGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata) +{ + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); + + return 0; +} + +static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata) +{ + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); + + return 0; +} + +static int axgbe_config_tx_flow_control(struct axgbe_port *pdata) +{ + if (pdata->tx_pause) + axgbe_enable_tx_flow_control(pdata); + else + axgbe_disable_tx_flow_control(pdata); + + return 0; +} + +static int axgbe_config_rx_flow_control(struct axgbe_port *pdata) +{ + if (pdata->rx_pause) + axgbe_enable_rx_flow_control(pdata); + else + axgbe_disable_rx_flow_control(pdata); + + return 0; +} + +static void axgbe_config_flow_control(struct axgbe_port *pdata) +{ + axgbe_config_tx_flow_control(pdata); + axgbe_config_rx_flow_control(pdata); + + AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); +} + +static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata, + unsigned int queue, + unsigned int q_fifo_size) +{ + unsigned int frame_fifo_size; + unsigned int rfa, rfd; + + frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata)); + + /* This path deals with just maximum frame sizes which are + * limited to a jumbo frame of 9,000 (plus headers, etc.) + * so we can never exceed the maximum allowable RFA/RFD + * values. + */ + if (q_fifo_size <= 2048) { + /* rx_rfd to zero to signal no flow control */ + pdata->rx_rfa[queue] = 0; + pdata->rx_rfd[queue] = 0; + return; + } + + if (q_fifo_size <= 4096) { + /* Between 2048 and 4096 */ + pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ + pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ + return; + } + + if (q_fifo_size <= frame_fifo_size) { + /* Between 4096 and max-frame */ + pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ + pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ + return; + } + + if (q_fifo_size <= (frame_fifo_size * 3)) { + /* Between max-frame and 3 max-frames, + * trigger if we get just over a frame of data and + * resume when we have just under half a frame left. + */ + rfa = q_fifo_size - frame_fifo_size; + rfd = rfa + (frame_fifo_size / 2); + } else { + /* Above 3 max-frames - trigger when just over + * 2 frames of space available + */ + rfa = frame_fifo_size * 2; + rfa += AXGMAC_FLOW_CONTROL_UNIT; + rfd = rfa + frame_fifo_size; + } + + pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa); + pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd); +} + +static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata) +{ + unsigned int q_fifo_size; + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) { + q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT; + + axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); + } +} + +static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) { + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, + pdata->rx_rfa[i]); + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, + pdata->rx_rfd[i]); + } +} + +static int __axgbe_exit(struct axgbe_port *pdata) +{ + unsigned int count = 2000; + + /* Issue a software reset */ + AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); + rte_delay_us(10); + + /* Poll Until Poll Condition */ + while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) + rte_delay_us(500); + + if (!count) + return -EBUSY; + + return 0; +} + +static int axgbe_exit(struct axgbe_port *pdata) +{ + int ret; + + /* To guard against possible incorrectly generated interrupts, + * issue the software reset twice. + */ + ret = __axgbe_exit(pdata); + if (ret) + return ret; + + return __axgbe_exit(pdata); +} + +static int axgbe_flush_tx_queues(struct axgbe_port *pdata) +{ + unsigned int i, count; + + if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) + return 0; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); + + /* Poll Until Poll Condition */ + for (i = 0; i < pdata->tx_q_count; i++) { + count = 2000; + while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i, + MTL_Q_TQOMR, FTQ)) + rte_delay_us(500); + + if (!count) + return -EBUSY; + } + + return 0; +} + +static void axgbe_config_dma_bus(struct axgbe_port *pdata) +{ + /* Set enhanced addressing mode */ + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1); + + /* Out standing read/write requests*/ + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f); + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f); + + /* Set the System Bus mode */ + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1); + AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1); +} + +static void axgbe_config_dma_cache(struct axgbe_port *pdata) +{ + unsigned int arcache, awcache, arwcache; + + arcache = 0; + AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3); + AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); + + awcache = 0; + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3); + AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1); + AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); + + arwcache = 0; + AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1); + AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3); + AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3); + AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache); +} + +static void axgbe_config_edma_control(struct axgbe_port *pdata) +{ + AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5); + AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5); +} + +static int axgbe_config_osp_mode(struct axgbe_port *pdata) +{ + /* Force DMA to operate on second packet before closing descriptors + * of first packet + */ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP, + pdata->tx_osp_mode); + } + + return 0; +} + +static int axgbe_config_pblx8(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8, + pdata->pblx8); + } + return 0; +} + +static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL, + pdata->tx_pbl); + } + + return 0; +} + +static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata) +{ + struct axgbe_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->eth_dev->data->rx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL, + pdata->rx_pbl); + } + + return 0; +} + +static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata) +{ + struct axgbe_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->eth_dev->data->rx_queues[i]; + + rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM; + rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) & + ~(AXGBE_RX_BUF_ALIGN - 1); + + if (rxq->buf_size > pdata->rx_buf_size) + pdata->rx_buf_size = rxq->buf_size; + + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ, + rxq->buf_size); + } +} + +static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type, + unsigned int index, unsigned int val) +{ + unsigned int wait; + + if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) + return -EBUSY; + + AXGMAC_IOWRITE(pdata, MAC_RSSDR, val); + + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); + + wait = 1000; + while (wait--) { + if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) + return 0; + + rte_delay_us(1500); + } + + return -EBUSY; +} + +static int axgbe_write_rss_hash_key(struct axgbe_port *pdata) +{ + struct rte_eth_rss_conf *rss_conf; + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + unsigned int *key; + int ret; + + rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + + if (!rss_conf->rss_key) + key = (unsigned int *)&pdata->rss_key; + else + key = (unsigned int *)&rss_conf->rss_key; + + while (key_regs--) { + ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE, + key_regs, *key++); + if (ret) + return ret; + } + + return 0; +} + +static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + ret = axgbe_write_rss_reg(pdata, + AXGBE_RSS_LOOKUP_TABLE_TYPE, i, + pdata->rss_table[i]); + if (ret) + return ret; + } + + return 0; +} + +static int axgbe_enable_rss(struct axgbe_port *pdata) +{ + int ret; + + /* Program the hash key */ + ret = axgbe_write_rss_hash_key(pdata); + if (ret) + return ret; + + /* Program the lookup table */ + ret = axgbe_write_rss_lookup_table(pdata); + if (ret) + return ret; + + /* Set the RSS options */ + AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); + + /* Enable RSS */ + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); + + return 0; +} + +static void axgbe_rss_options(struct axgbe_port *pdata) +{ + struct rte_eth_rss_conf *rss_conf; + uint64_t rss_hf; + + rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + rss_hf = rss_conf->rss_hf; + + if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6)) + AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); + if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP)) + AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); + if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP)) + AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); +} + +static int axgbe_config_rss(struct axgbe_port *pdata) +{ + uint32_t i; + + if (pdata->rss_enable) { + /* Initialize RSS hash key and lookup table */ + uint32_t *key = (uint32_t *)pdata->rss_key; + + for (i = 0; i < sizeof(pdata->rss_key) / 4; i++) + *key++ = (uint32_t)rte_rand(); + for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++) + AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, + i % pdata->eth_dev->data->nb_rx_queues); + axgbe_rss_options(pdata); + if (axgbe_enable_rss(pdata)) { + PMD_DRV_LOG(ERR, "Error in enabling RSS support\n"); + return -1; + } + } else { + AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); + } + + return 0; +} + +static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int dma_ch_isr, dma_ch_ier; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + + /* Clear all the interrupts which are set */ + dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR); + AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr); + + /* Clear all interrupt enable bits */ + dma_ch_ier = 0; + + /* Enable following interrupts + * NIE - Normal Interrupt Summary Enable + * AIE - Abnormal Interrupt Summary Enable + * FBEE - Fatal Bus Error Enable + */ + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0); + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + + /* Enable following Rx interrupts + * RBUE - Receive Buffer Unavailable Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts in edge triggered + * mode) + */ + AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); + + AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier); + } +} + +static void wrapper_tx_desc_init(struct axgbe_port *pdata) +{ + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { + txq = pdata->eth_dev->data->tx_queues[i]; + txq->cur = 0; + txq->dirty = 0; + /* Update the total number of Tx descriptors */ + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1); + /* Update the starting address of descriptor ring */ + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI, + high32_value(txq->ring_phys_addr)); + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO, + low32_value(txq->ring_phys_addr)); + } +} + +static int wrapper_rx_desc_init(struct axgbe_port *pdata) +{ + struct axgbe_rx_queue *rxq; + struct rte_mbuf *mbuf; + volatile union axgbe_rx_desc *desc; + unsigned int i, j; + + for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->eth_dev->data->rx_queues[i]; + + /* Initialize software ring entries */ + rxq->mbuf_alloc = 0; + rxq->cur = 0; + rxq->dirty = 0; + desc = AXGBE_GET_DESC_PT(rxq, 0); + + for (j = 0; j < rxq->nb_desc; j++) { + mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (mbuf == NULL) { + PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n", + (unsigned int)rxq->queue_id, j); + axgbe_dev_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->sw_ring[j] = mbuf; + /* Mbuf populate */ + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + desc->read.baddr = + rte_cpu_to_le_64( + rte_mbuf_data_iova_default(mbuf)); + rte_wmb(); + AXGMAC_SET_BITS_LE(desc->read.desc3, + RX_NORMAL_DESC3, OWN, 1); + rte_wmb(); + rxq->mbuf_alloc++; + desc++; + } + /* Update the total number of Rx descriptors */ + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR, + rxq->nb_desc - 1); + /* Update the starting address of descriptor ring */ + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI, + high32_value(rxq->ring_phys_addr)); + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO, + low32_value(rxq->ring_phys_addr)); + /* Update the Rx Descriptor Tail Pointer */ + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, + low32_value(rxq->ring_phys_addr + + (rxq->nb_desc - 1) * + sizeof(union axgbe_rx_desc))); + } + return 0; +} + +static void axgbe_config_mtl_mode(struct axgbe_port *pdata) +{ + unsigned int i; + + /* Set Tx to weighted round robin scheduling algorithm */ + AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); + + /* Set Tx traffic classes to use WRR algorithm with equal weights */ + for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, + MTL_TSA_ETS); + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); + } + + /* Set Rx to strict priority algorithm */ + AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); +} + +static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); + + return 0; +} + +static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); + + return 0; +} + +static int axgbe_config_tx_threshold(struct axgbe_port *pdata, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); + + return 0; +} + +static int axgbe_config_rx_threshold(struct axgbe_port *pdata, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); + + return 0; +} + +/*Distrubting fifo size */ +static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata) +{ + unsigned int fifo_size; + unsigned int q_fifo_size; + unsigned int p_fifo, i; + + fifo_size = RTE_MIN(pdata->rx_max_fifo_size, + pdata->hw_feat.rx_fifo_size); + q_fifo_size = fifo_size / pdata->rx_q_count; + + /* Calculate the fifo setting by dividing the queue's fifo size + * by the fifo allocation increment (with 0 representing the + * base allocation increment so decrement the result + * by 1). + */ + p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT; + if (p_fifo) + p_fifo--; + + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo); + pdata->fifo = p_fifo; + + /*Calculate and config Flow control threshold*/ + axgbe_calculate_flow_control_threshold(pdata); + axgbe_config_flow_control_threshold(pdata); +} + +static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata) +{ + unsigned int fifo_size; + unsigned int q_fifo_size; + unsigned int p_fifo, i; + + fifo_size = RTE_MIN(pdata->tx_max_fifo_size, + pdata->hw_feat.tx_fifo_size); + q_fifo_size = fifo_size / pdata->tx_q_count; + + /* Calculate the fifo setting by dividing the queue's fifo size + * by the fifo allocation increment (with 0 representing the + * base allocation increment so decrement the result + * by 1). + */ + p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT; + if (p_fifo) + p_fifo--; + + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo); +} + +static void axgbe_config_queue_mapping(struct axgbe_port *pdata) +{ + unsigned int qptc, qptc_extra, queue; + unsigned int i, j, reg, reg_val; + + /* Map the MTL Tx Queues to Traffic Classes + * Note: Tx Queues >= Traffic Classes + */ + qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; + qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; + + for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { + for (j = 0; j < qptc; j++) + AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, + Q2TCMAP, i); + if (i < qptc_extra) + AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, + Q2TCMAP, i); + } + + if (pdata->rss_enable) { + /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ + reg = MTL_RQDCM0R; + reg_val = 0; + for (i = 0; i < pdata->rx_q_count;) { + reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); + + if ((i % MTL_RQDCM_Q_PER_REG) && + (i != pdata->rx_q_count)) + continue; + + AXGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MTL_RQDCM_INC; + reg_val = 0; + } + } +} + +static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata) +{ + unsigned int mtl_q_isr; + unsigned int q_count, i; + + q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); + AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); + + /* No MTL interrupts to be enabled */ + AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); + } +} + +static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr) +{ + unsigned int mac_addr_hi, mac_addr_lo; + + mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); + mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | + (addr[1] << 8) | (addr[0] << 0); + + AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); + AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); + + return 0; +} + +static void axgbe_config_mac_address(struct axgbe_port *pdata) +{ + axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes); +} + +static void axgbe_config_jumbo_enable(struct axgbe_port *pdata) +{ + unsigned int val; + + val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0; + + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); +} + +static void axgbe_config_mac_speed(struct axgbe_port *pdata) +{ + axgbe_set_speed(pdata, pdata->phy_speed); +} + +static void axgbe_config_checksum_offload(struct axgbe_port *pdata) +{ + if (pdata->rx_csum_enable) + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); + else + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); +} + +static int axgbe_init(struct axgbe_port *pdata) +{ + int ret; + + /* Flush Tx queues */ + ret = axgbe_flush_tx_queues(pdata); + if (ret) + return ret; + /* Initialize DMA related features */ + axgbe_config_dma_bus(pdata); + axgbe_config_dma_cache(pdata); + axgbe_config_edma_control(pdata); + axgbe_config_osp_mode(pdata); + axgbe_config_pblx8(pdata); + axgbe_config_tx_pbl_val(pdata); + axgbe_config_rx_pbl_val(pdata); + axgbe_config_rx_buffer_size(pdata); + axgbe_config_rss(pdata); + wrapper_tx_desc_init(pdata); + ret = wrapper_rx_desc_init(pdata); + if (ret) + return ret; + axgbe_enable_dma_interrupts(pdata); + + /* Initialize MTL related features */ + axgbe_config_mtl_mode(pdata); + axgbe_config_queue_mapping(pdata); + axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); + axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); + axgbe_config_tx_threshold(pdata, pdata->tx_threshold); + axgbe_config_rx_threshold(pdata, pdata->rx_threshold); + axgbe_config_tx_fifo_size(pdata); + axgbe_config_rx_fifo_size(pdata); + + axgbe_enable_mtl_interrupts(pdata); + + /* Initialize MAC related features */ + axgbe_config_mac_address(pdata); + axgbe_config_jumbo_enable(pdata); + axgbe_config_flow_control(pdata); + axgbe_config_mac_speed(pdata); + axgbe_config_checksum_offload(pdata); + + return 0; +} + +void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if) +{ + hw_if->exit = axgbe_exit; + hw_if->config_flow_control = axgbe_config_flow_control; + + hw_if->init = axgbe_init; + + hw_if->read_mmd_regs = axgbe_read_mmd_regs; + hw_if->write_mmd_regs = axgbe_write_mmd_regs; + + hw_if->set_speed = axgbe_set_speed; + + hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode; + hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs; + hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs; + /* For FLOW ctrl */ + hw_if->config_tx_flow_control = axgbe_config_tx_flow_control; + hw_if->config_rx_flow_control = axgbe_config_rx_flow_control; +} diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c new file mode 100644 index 00000000..7a3ba2e7 --- /dev/null +++ b/drivers/net/axgbe/axgbe_ethdev.c @@ -0,0 +1,772 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_rxtx.h" +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" + +static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); +static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); +static int axgbe_dev_configure(struct rte_eth_dev *dev); +static int axgbe_dev_start(struct rte_eth_dev *dev); +static void axgbe_dev_stop(struct rte_eth_dev *dev); +static void axgbe_dev_interrupt_handler(void *param); +static void axgbe_dev_close(struct rte_eth_dev *dev); +static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); +static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); +static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int axgbe_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int axgbe_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static void axgbe_dev_stats_reset(struct rte_eth_dev *dev); +static void axgbe_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); + +/* The set of PCI devices this driver supports */ +#define AMD_PCI_VENDOR_ID 0x1022 +#define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 +#define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 + +int axgbe_logtype_init; +int axgbe_logtype_driver; + +static const struct rte_pci_id pci_id_axgbe_map[] = { + {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, + {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, + { .vendor_id = 0, }, +}; + +static struct axgbe_version_data axgbe_v2a = { + .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, + .xpcs_access = AXGBE_XPCS_ACCESS_V2, + .mmc_64bit = 1, + .tx_max_fifo_size = 229376, + .rx_max_fifo_size = 229376, + .tx_tstamp_workaround = 1, + .ecc_support = 1, + .i2c_support = 1, + .an_cdr_workaround = 1, +}; + +static struct axgbe_version_data axgbe_v2b = { + .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, + .xpcs_access = AXGBE_XPCS_ACCESS_V2, + .mmc_64bit = 1, + .tx_max_fifo_size = 65536, + .rx_max_fifo_size = 65536, + .tx_tstamp_workaround = 1, + .ecc_support = 1, + .i2c_support = 1, + .an_cdr_workaround = 1, +}; + +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = AXGBE_MAX_RING_DESC, + .nb_min = AXGBE_MIN_RING_DESC, + .nb_align = 8, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = AXGBE_MAX_RING_DESC, + .nb_min = AXGBE_MIN_RING_DESC, + .nb_align = 8, +}; + +static const struct eth_dev_ops axgbe_eth_dev_ops = { + .dev_configure = axgbe_dev_configure, + .dev_start = axgbe_dev_start, + .dev_stop = axgbe_dev_stop, + .dev_close = axgbe_dev_close, + .promiscuous_enable = axgbe_dev_promiscuous_enable, + .promiscuous_disable = axgbe_dev_promiscuous_disable, + .allmulticast_enable = axgbe_dev_allmulticast_enable, + .allmulticast_disable = axgbe_dev_allmulticast_disable, + .link_update = axgbe_dev_link_update, + .stats_get = axgbe_dev_stats_get, + .stats_reset = axgbe_dev_stats_reset, + .dev_infos_get = axgbe_dev_info_get, + .rx_queue_setup = axgbe_dev_rx_queue_setup, + .rx_queue_release = axgbe_dev_rx_queue_release, + .tx_queue_setup = axgbe_dev_tx_queue_setup, + .tx_queue_release = axgbe_dev_tx_queue_release, +}; + +static int axgbe_phy_reset(struct axgbe_port *pdata) +{ + pdata->phy_link = -1; + pdata->phy_speed = SPEED_UNKNOWN; + return pdata->phy_if.phy_reset(pdata); +} + +/* + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +axgbe_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int dma_isr, dma_ch_isr; + + pdata->phy_if.an_isr(pdata); + /*DMA related interrupts*/ + dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); + if (dma_isr) { + if (dma_isr & 1) { + dma_ch_isr = + AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) + pdata->rx_queues[0], + DMA_CH_SR); + AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) + pdata->rx_queues[0], + DMA_CH_SR, dma_ch_isr); + } + } + /* Enable interrupts since disabled after generation*/ + rte_intr_enable(&pdata->pci_dev->intr_handle); +} + +/* + * Configure device link speed and setup link. + * It returns 0 on success. + */ +static int +axgbe_dev_configure(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + /* Checksum offload to hardware */ + pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CHECKSUM; + return 0; +} + +static int +axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private; + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) + pdata->rss_enable = 1; + else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) + pdata->rss_enable = 0; + else + return -1; + return 0; +} + +static int +axgbe_dev_start(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private; + int ret; + + /* Multiqueue RSS */ + ret = axgbe_dev_rx_mq_config(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); + return ret; + } + ret = axgbe_phy_reset(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "phy reset failed\n"); + return ret; + } + ret = pdata->hw_if.init(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "dev_init failed\n"); + return ret; + } + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(&pdata->pci_dev->intr_handle); + + /* phy start*/ + pdata->phy_if.phy_start(pdata); + axgbe_dev_enable_tx(dev); + axgbe_dev_enable_rx(dev); + + axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state); + axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state); + return 0; +} + +/* Stop device: disable rx and tx functions to allow for reconfiguring. */ +static void +axgbe_dev_stop(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = dev->data->dev_private; + + rte_intr_disable(&pdata->pci_dev->intr_handle); + + if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) + return; + + axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); + axgbe_dev_disable_tx(dev); + axgbe_dev_disable_rx(dev); + + pdata->phy_if.phy_stop(pdata); + pdata->hw_if.exit(pdata); + memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); + axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); +} + +/* Clear all resources like TX/RX queues. */ +static void +axgbe_dev_close(struct rte_eth_dev *dev) +{ + axgbe_dev_clear_queues(dev); +} + +static void +axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = dev->data->dev_private; + + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); +} + +static void +axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = dev->data->dev_private; + + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); +} + +static void +axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = dev->data->dev_private; + + if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) + return; + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); +} + +static void +axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata = dev->data->dev_private; + + if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) + return; + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); +} + +/* return 0 means link status changed, -1 means not changed */ +static int +axgbe_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + struct axgbe_port *pdata = dev->data->dev_private; + struct rte_eth_link link; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + rte_delay_ms(800); + + pdata->phy_if.phy_status(pdata); + + memset(&link, 0, sizeof(struct rte_eth_link)); + link.link_duplex = pdata->phy.duplex; + link.link_status = pdata->phy_link; + link.link_speed = pdata->phy_speed; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == -1) + PMD_DRV_LOG(ERR, "No change in link status\n"); + + return ret; +} + +static int +axgbe_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + stats->q_ipackets[i] = rxq->pkts; + stats->ipackets += rxq->pkts; + stats->q_ibytes[i] = rxq->bytes; + stats->ibytes += rxq->bytes; + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + stats->q_opackets[i] = txq->pkts; + stats->opackets += txq->pkts; + stats->q_obytes[i] = txq->bytes; + stats->obytes += txq->bytes; + } + + return 0; +} + +static void +axgbe_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_tx_queue *txq; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->pkts = 0; + rxq->bytes = 0; + rxq->errors = 0; + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + txq->pkts = 0; + txq->bytes = 0; + txq->errors = 0; + } +} + +static void +axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + dev_info->max_rx_queues = pdata->rx_ring_count; + dev_info->max_tx_queues = pdata->tx_ring_count; + dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; + dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; + dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS; + dev_info->speed_capa = ETH_LINK_SPEED_10G; + + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + if (pdata->hw_feat.rss) { + dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; + dev_info->reta_size = pdata->hw_feat.hash_table_size; + dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; + } + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = AXGBE_RX_FREE_THRESH, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_free_thresh = AXGBE_TX_FREE_THRESH, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; +} + +static void axgbe_get_all_hw_features(struct axgbe_port *pdata) +{ + unsigned int mac_hfr0, mac_hfr1, mac_hfr2; + struct axgbe_hw_features *hw_feat = &pdata->hw_feat; + + mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); + mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); + mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); + + memset(hw_feat, 0, sizeof(*hw_feat)); + + hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); + + /* Hardware feature register 0 */ + hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); + hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); + hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); + hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); + hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); + hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); + hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); + hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); + hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); + hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); + hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); + hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, + ADDMACADRSEL); + hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); + hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); + + /* Hardware feature register 1 */ + hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + RXFIFOSIZE); + hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + TXFIFOSIZE); + hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, + MAC_HWF1R, ADVTHWORD); + hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); + hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); + hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); + hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); + hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); + hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); + hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); + hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + HASHTBLSZ); + hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + L3L4FNUM); + + /* Hardware feature register 2 */ + hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); + hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); + hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); + hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); + hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); + hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, + AUXSNAPNUM); + + /* Translate the Hash Table size into actual number */ + switch (hw_feat->hash_table_size) { + case 0: + break; + case 1: + hw_feat->hash_table_size = 64; + break; + case 2: + hw_feat->hash_table_size = 128; + break; + case 3: + hw_feat->hash_table_size = 256; + break; + } + + /* Translate the address width setting into actual number */ + switch (hw_feat->dma_width) { + case 0: + hw_feat->dma_width = 32; + break; + case 1: + hw_feat->dma_width = 40; + break; + case 2: + hw_feat->dma_width = 48; + break; + default: + hw_feat->dma_width = 32; + } + + /* The Queue, Channel and TC counts are zero based so increment them + * to get the actual number + */ + hw_feat->rx_q_cnt++; + hw_feat->tx_q_cnt++; + hw_feat->rx_ch_cnt++; + hw_feat->tx_ch_cnt++; + hw_feat->tc_cnt++; + + /* Translate the fifo sizes into actual numbers */ + hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); + hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); +} + +static void axgbe_init_all_fptrs(struct axgbe_port *pdata) +{ + axgbe_init_function_ptrs_dev(&pdata->hw_if); + axgbe_init_function_ptrs_phy(&pdata->phy_if); + axgbe_init_function_ptrs_i2c(&pdata->i2c_if); + pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); +} + +static void axgbe_set_counts(struct axgbe_port *pdata) +{ + /* Set all the function pointers */ + axgbe_init_all_fptrs(pdata); + + /* Populate the hardware features */ + axgbe_get_all_hw_features(pdata); + + /* Set default max values if not provided */ + if (!pdata->tx_max_channel_count) + pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; + if (!pdata->rx_max_channel_count) + pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; + + if (!pdata->tx_max_q_count) + pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; + if (!pdata->rx_max_q_count) + pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; + + /* Calculate the number of Tx and Rx rings to be created + * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set + * the number of Tx queues to the number of Tx channels + * enabled + * -Rx (DMA) Channels do not map 1-to-1 so use the actual + * number of Rx queues or maximum allowed + */ + pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, + pdata->tx_max_channel_count); + pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, + pdata->tx_max_q_count); + + pdata->tx_q_count = pdata->tx_ring_count; + + pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, + pdata->rx_max_channel_count); + + pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, + pdata->rx_max_q_count); +} + +static void axgbe_default_config(struct axgbe_port *pdata) +{ + pdata->pblx8 = DMA_PBL_X8_ENABLE; + pdata->tx_sf_mode = MTL_TSF_ENABLE; + pdata->tx_threshold = MTL_TX_THRESHOLD_64; + pdata->tx_pbl = DMA_PBL_32; + pdata->tx_osp_mode = DMA_OSP_ENABLE; + pdata->rx_sf_mode = MTL_RSF_ENABLE; + pdata->rx_threshold = MTL_RX_THRESHOLD_64; + pdata->rx_pbl = DMA_PBL_32; + pdata->pause_autoneg = 1; + pdata->tx_pause = 0; + pdata->rx_pause = 0; + pdata->phy_speed = SPEED_UNKNOWN; + pdata->power_down = 0; +} + +/* + * It returns 0 on success. + */ +static int +eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) +{ + PMD_INIT_FUNC_TRACE(); + struct axgbe_port *pdata; + struct rte_pci_device *pci_dev; + uint32_t reg, mac_lo, mac_hi; + int ret; + + eth_dev->dev_ops = &axgbe_eth_dev_ops; + eth_dev->rx_pkt_burst = &axgbe_recv_pkts; + + /* + * For secondary processes, we don't initialise any further as primary + * has already done this work. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + pdata = (struct axgbe_port *)eth_dev->data->dev_private; + /* initial state */ + axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); + axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); + pdata->eth_dev = eth_dev; + + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + pdata->pci_dev = pci_dev; + + pdata->xgmac_regs = + (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; + pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs + + AXGBE_MAC_PROP_OFFSET); + pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs + + AXGBE_I2C_CTRL_OFFSET); + pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; + + /* version specific driver data*/ + if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) + pdata->vdata = &axgbe_v2a; + else + pdata->vdata = &axgbe_v2b; + + /* Configure the PCS indirect addressing support */ + reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); + pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); + pdata->xpcs_window <<= 6; + pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); + pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); + pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; + pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; + PMD_INIT_LOG(DEBUG, + "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, + pdata->xpcs_window_size, pdata->xpcs_window_mask); + XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); + + /* Retrieve the MAC address */ + mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); + mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); + pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; + pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; + pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; + pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; + pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; + pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; + + eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", + ETHER_ADDR_LEN, 0); + if (!eth_dev->data->mac_addrs) { + PMD_INIT_LOG(ERR, + "Failed to alloc %u bytes needed to store MAC addr tbl", + ETHER_ADDR_LEN); + return -ENOMEM; + } + + if (!is_valid_assigned_ether_addr(&pdata->mac_addr)) + eth_random_addr(pdata->mac_addr.addr_bytes); + + /* Copy the permanent MAC address */ + ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); + + /* Clock settings */ + pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; + pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; + + /* Set the DMA coherency values */ + pdata->coherent = 1; + pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; + pdata->arcache = AXGBE_DMA_OS_ARCACHE; + pdata->awcache = AXGBE_DMA_OS_AWCACHE; + + /* Set the maximum channels and queues */ + reg = XP_IOREAD(pdata, XP_PROP_1); + pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); + pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); + pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); + pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); + + /* Set the hardware channel and queue counts */ + axgbe_set_counts(pdata); + + /* Set the maximum fifo amounts */ + reg = XP_IOREAD(pdata, XP_PROP_2); + pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); + pdata->tx_max_fifo_size *= 16384; + pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, + pdata->vdata->tx_max_fifo_size); + pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); + pdata->rx_max_fifo_size *= 16384; + pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, + pdata->vdata->rx_max_fifo_size); + /* Issue software reset to DMA */ + ret = pdata->hw_if.exit(pdata); + if (ret) + PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); + + /* Set default configuration data */ + axgbe_default_config(pdata); + + /* Set default max values if not provided */ + if (!pdata->tx_max_fifo_size) + pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; + if (!pdata->rx_max_fifo_size) + pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; + + pdata->tx_desc_count = AXGBE_MAX_RING_DESC; + pdata->rx_desc_count = AXGBE_MAX_RING_DESC; + pthread_mutex_init(&pdata->xpcs_mutex, NULL); + pthread_mutex_init(&pdata->i2c_mutex, NULL); + pthread_mutex_init(&pdata->an_mutex, NULL); + pthread_mutex_init(&pdata->phy_mutex, NULL); + + ret = pdata->phy_if.phy_init(pdata); + if (ret) { + rte_free(eth_dev->data->mac_addrs); + return ret; + } + + rte_intr_callback_register(&pci_dev->intr_handle, + axgbe_dev_interrupt_handler, + (void *)eth_dev); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + return 0; +} + +static int +eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + /*Free macaddres*/ + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + axgbe_dev_clear_queues(eth_dev); + + /* disable uio intr before callback unregister */ + rte_intr_disable(&pci_dev->intr_handle); + rte_intr_callback_unregister(&pci_dev->intr_handle, + axgbe_dev_interrupt_handler, + (void *)eth_dev); + + return 0; +} + +static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct axgbe_port), eth_axgbe_dev_init); +} + +static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); +} + +static struct rte_pci_driver rte_axgbe_pmd = { + .id_table = pci_id_axgbe_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_axgbe_pci_probe, + .remove = eth_axgbe_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); +RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); + +RTE_INIT(axgbe_init_log); +static void +axgbe_init_log(void) +{ + axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); + if (axgbe_logtype_init >= 0) + rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); + axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); + if (axgbe_logtype_driver >= 0) + rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h new file mode 100644 index 00000000..b1cd2980 --- /dev/null +++ b/drivers/net/axgbe/axgbe_ethdev.h @@ -0,0 +1,586 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef RTE_ETH_AXGBE_H_ +#define RTE_ETH_AXGBE_H_ + +#include +#include +#include "axgbe_common.h" + +#define IRQ 0xff +#define VLAN_HLEN 4 + +#define AXGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +#define AXGBE_RX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +#define AXGBE_RX_MIN_BUF_SIZE (ETHER_MAX_LEN + VLAN_HLEN) +#define AXGBE_MAX_MAC_ADDRS 1 + +#define AXGBE_RX_BUF_ALIGN 64 + +#define AXGBE_MAX_DMA_CHANNELS 16 +#define AXGBE_MAX_QUEUES 16 +#define AXGBE_PRIORITY_QUEUES 8 +#define AXGBE_DMA_STOP_TIMEOUT 1 + +/* DMA cache settings - Outer sharable, write-back, write-allocate */ +#define AXGBE_DMA_OS_AXDOMAIN 0x2 +#define AXGBE_DMA_OS_ARCACHE 0xb +#define AXGBE_DMA_OS_AWCACHE 0xf + +/* DMA cache settings - System, no caches used */ +#define AXGBE_DMA_SYS_AXDOMAIN 0x3 +#define AXGBE_DMA_SYS_ARCACHE 0x0 +#define AXGBE_DMA_SYS_AWCACHE 0x0 + +/* DMA channel interrupt modes */ +#define AXGBE_IRQ_MODE_EDGE 0 +#define AXGBE_IRQ_MODE_LEVEL 1 + +#define AXGBE_DMA_INTERRUPT_MASK 0x31c7 + +#define AXGMAC_MIN_PACKET 60 +#define AXGMAC_STD_PACKET_MTU 1500 +#define AXGMAC_MAX_STD_PACKET 1518 +#define AXGMAC_JUMBO_PACKET_MTU 9000 +#define AXGMAC_MAX_JUMBO_PACKET 9018 +/* Inter-frame gap + preamble */ +#define AXGMAC_ETH_PREAMBLE (12 + 8) + +#define AXGMAC_PFC_DATA_LEN 46 +#define AXGMAC_PFC_DELAYS 14000 + +/* PCI BAR mapping */ +#define AXGBE_AXGMAC_BAR 0 +#define AXGBE_XPCS_BAR 1 +#define AXGBE_MAC_PROP_OFFSET 0x1d000 +#define AXGBE_I2C_CTRL_OFFSET 0x1e000 + +/* PCI clock frequencies */ +#define AXGBE_V2_DMA_CLOCK_FREQ 500000000 +#define AXGBE_V2_PTP_CLOCK_FREQ 125000000 + +#define AXGMAC_FIFO_MIN_ALLOC 2048 +#define AXGMAC_FIFO_UNIT 256 +#define AXGMAC_FIFO_ALIGN(_x) \ + (((_x) + AXGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1)) +#define AXGMAC_FIFO_FC_OFF 2048 +#define AXGMAC_FIFO_FC_MIN 4096 + +#define AXGBE_TC_MIN_QUANTUM 10 + +/* Flow control queue count */ +#define AXGMAC_MAX_FLOW_CONTROL_QUEUES 8 + +/* Flow control threshold units */ +#define AXGMAC_FLOW_CONTROL_UNIT 512 +#define AXGMAC_FLOW_CONTROL_ALIGN(_x) \ + (((_x) + AXGMAC_FLOW_CONTROL_UNIT - 1) & \ + ~(AXGMAC_FLOW_CONTROL_UNIT - 1)) +#define AXGMAC_FLOW_CONTROL_VALUE(_x) \ + (((_x) < 1024) ? 0 : ((_x) / AXGMAC_FLOW_CONTROL_UNIT) - 2) +#define AXGMAC_FLOW_CONTROL_MAX 33280 + +/* Maximum MAC address hash table size (256 bits = 8 bytes) */ +#define AXGBE_MAC_HASH_TABLE_SIZE 8 + +/* Receive Side Scaling */ +#define AXGBE_RSS_OFFLOAD ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP) + +#define AXGBE_RSS_HASH_KEY_SIZE 40 +#define AXGBE_RSS_MAX_TABLE_SIZE 256 +#define AXGBE_RSS_LOOKUP_TABLE_TYPE 0 +#define AXGBE_RSS_HASH_KEY_TYPE 1 + +/* Auto-negotiation */ +#define AXGBE_AN_MS_TIMEOUT 500 +#define AXGBE_LINK_TIMEOUT 5 + +#define AXGBE_SGMII_AN_LINK_STATUS BIT(1) +#define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) +#define AXGBE_SGMII_AN_LINK_SPEED_100 0x04 +#define AXGBE_SGMII_AN_LINK_SPEED_1000 0x08 +#define AXGBE_SGMII_AN_LINK_DUPLEX BIT(4) + +/* ECC correctable error notification window (seconds) */ +#define AXGBE_ECC_LIMIT 60 + +/* MDIO port types */ +#define AXGMAC_MAX_C22_PORT 3 + +/* Helper macro for descriptor handling + * Always use AXGBE_GET_DESC_DATA to access the descriptor data + * since the index is free-running and needs to be and-ed + * with the descriptor count value of the ring to index to + * the proper descriptor data. + */ +#define AXGBE_GET_DESC_DATA(_ring, _idx) \ + ((_ring)->rdata + \ + ((_idx) & ((_ring)->rdesc_count - 1))) + +struct axgbe_port; + +enum axgbe_state { + AXGBE_DOWN, + AXGBE_LINK_INIT, + AXGBE_LINK_ERR, + AXGBE_STOPPED, +}; + +enum axgbe_int { + AXGMAC_INT_DMA_CH_SR_TI, + AXGMAC_INT_DMA_CH_SR_TPS, + AXGMAC_INT_DMA_CH_SR_TBU, + AXGMAC_INT_DMA_CH_SR_RI, + AXGMAC_INT_DMA_CH_SR_RBU, + AXGMAC_INT_DMA_CH_SR_RPS, + AXGMAC_INT_DMA_CH_SR_TI_RI, + AXGMAC_INT_DMA_CH_SR_FBE, + AXGMAC_INT_DMA_ALL, +}; + +enum axgbe_int_state { + AXGMAC_INT_STATE_SAVE, + AXGMAC_INT_STATE_RESTORE, +}; + +enum axgbe_ecc_sec { + AXGBE_ECC_SEC_TX, + AXGBE_ECC_SEC_RX, + AXGBE_ECC_SEC_DESC, +}; + +enum axgbe_speed { + AXGBE_SPEED_1000 = 0, + AXGBE_SPEED_2500, + AXGBE_SPEED_10000, + AXGBE_SPEEDS, +}; + +enum axgbe_xpcs_access { + AXGBE_XPCS_ACCESS_V1 = 0, + AXGBE_XPCS_ACCESS_V2, +}; + +enum axgbe_an_mode { + AXGBE_AN_MODE_CL73 = 0, + AXGBE_AN_MODE_CL73_REDRV, + AXGBE_AN_MODE_CL37, + AXGBE_AN_MODE_CL37_SGMII, + AXGBE_AN_MODE_NONE, +}; + +enum axgbe_an { + AXGBE_AN_READY = 0, + AXGBE_AN_PAGE_RECEIVED, + AXGBE_AN_INCOMPAT_LINK, + AXGBE_AN_COMPLETE, + AXGBE_AN_NO_LINK, + AXGBE_AN_ERROR, +}; + +enum axgbe_rx { + AXGBE_RX_BPA = 0, + AXGBE_RX_XNP, + AXGBE_RX_COMPLETE, + AXGBE_RX_ERROR, +}; + +enum axgbe_mode { + AXGBE_MODE_KX_1000 = 0, + AXGBE_MODE_KX_2500, + AXGBE_MODE_KR, + AXGBE_MODE_X, + AXGBE_MODE_SGMII_100, + AXGBE_MODE_SGMII_1000, + AXGBE_MODE_SFI, + AXGBE_MODE_UNKNOWN, +}; + +enum axgbe_speedset { + AXGBE_SPEEDSET_1000_10000 = 0, + AXGBE_SPEEDSET_2500_10000, +}; + +enum axgbe_mdio_mode { + AXGBE_MDIO_MODE_NONE = 0, + AXGBE_MDIO_MODE_CL22, + AXGBE_MDIO_MODE_CL45, +}; + +struct axgbe_phy { + uint32_t supported; + uint32_t advertising; + uint32_t lp_advertising; + + int address; + + int autoneg; + int speed; + int duplex; + + int link; + + int pause_autoneg; + int tx_pause; + int rx_pause; +}; + +enum axgbe_i2c_cmd { + AXGBE_I2C_CMD_READ = 0, + AXGBE_I2C_CMD_WRITE, +}; + +struct axgbe_i2c_op { + enum axgbe_i2c_cmd cmd; + + unsigned int target; + + uint8_t *buf; + unsigned int len; +}; + +struct axgbe_i2c_op_state { + struct axgbe_i2c_op *op; + + unsigned int tx_len; + unsigned char *tx_buf; + + unsigned int rx_len; + unsigned char *rx_buf; + + unsigned int tx_abort_source; + + int ret; +}; + +struct axgbe_i2c { + unsigned int started; + unsigned int max_speed_mode; + unsigned int rx_fifo_size; + unsigned int tx_fifo_size; + + struct axgbe_i2c_op_state op_state; +}; + +struct axgbe_hw_if { + void (*config_flow_control)(struct axgbe_port *); + int (*config_rx_mode)(struct axgbe_port *); + + int (*init)(struct axgbe_port *); + + int (*read_mmd_regs)(struct axgbe_port *, int, int); + void (*write_mmd_regs)(struct axgbe_port *, int, int, int); + int (*set_speed)(struct axgbe_port *, int); + + int (*set_ext_mii_mode)(struct axgbe_port *, unsigned int, + enum axgbe_mdio_mode); + int (*read_ext_mii_regs)(struct axgbe_port *, int, int); + int (*write_ext_mii_regs)(struct axgbe_port *, int, int, uint16_t); + + /* For FLOW ctrl */ + int (*config_tx_flow_control)(struct axgbe_port *); + int (*config_rx_flow_control)(struct axgbe_port *); + + int (*exit)(struct axgbe_port *); +}; + +/* This structure represents implementation specific routines for an + * implementation of a PHY. All routines are required unless noted below. + * Optional routines: + * kr_training_pre, kr_training_post + */ +struct axgbe_phy_impl_if { + /* Perform Setup/teardown actions */ + int (*init)(struct axgbe_port *); + void (*exit)(struct axgbe_port *); + + /* Perform start/stop specific actions */ + int (*reset)(struct axgbe_port *); + int (*start)(struct axgbe_port *); + void (*stop)(struct axgbe_port *); + + /* Return the link status */ + int (*link_status)(struct axgbe_port *, int *); + + /* Indicate if a particular speed is valid */ + int (*valid_speed)(struct axgbe_port *, int); + + /* Check if the specified mode can/should be used */ + bool (*use_mode)(struct axgbe_port *, enum axgbe_mode); + /* Switch the PHY into various modes */ + void (*set_mode)(struct axgbe_port *, enum axgbe_mode); + /* Retrieve mode needed for a specific speed */ + enum axgbe_mode (*get_mode)(struct axgbe_port *, int); + /* Retrieve new/next mode when trying to auto-negotiate */ + enum axgbe_mode (*switch_mode)(struct axgbe_port *); + /* Retrieve current mode */ + enum axgbe_mode (*cur_mode)(struct axgbe_port *); + + /* Retrieve current auto-negotiation mode */ + enum axgbe_an_mode (*an_mode)(struct axgbe_port *); + + /* Configure auto-negotiation settings */ + int (*an_config)(struct axgbe_port *); + + /* Set/override auto-negotiation advertisement settings */ + unsigned int (*an_advertising)(struct axgbe_port *port); + + /* Process results of auto-negotiation */ + enum axgbe_mode (*an_outcome)(struct axgbe_port *); + + /* Pre/Post auto-negotiation support */ + void (*an_pre)(struct axgbe_port *port); + void (*an_post)(struct axgbe_port *port); + + /* Pre/Post KR training enablement support */ + void (*kr_training_pre)(struct axgbe_port *); + void (*kr_training_post)(struct axgbe_port *); +}; + +struct axgbe_phy_if { + /* For PHY setup/teardown */ + int (*phy_init)(struct axgbe_port *); + void (*phy_exit)(struct axgbe_port *); + + /* For PHY support when setting device up/down */ + int (*phy_reset)(struct axgbe_port *); + int (*phy_start)(struct axgbe_port *); + void (*phy_stop)(struct axgbe_port *); + + /* For PHY support while device is up */ + void (*phy_status)(struct axgbe_port *); + int (*phy_config_aneg)(struct axgbe_port *); + + /* For PHY settings validation */ + int (*phy_valid_speed)(struct axgbe_port *, int); + /* For single interrupt support */ + void (*an_isr)(struct axgbe_port *); + /* PHY implementation specific services */ + struct axgbe_phy_impl_if phy_impl; +}; + +struct axgbe_i2c_if { + /* For initial I2C setup */ + int (*i2c_init)(struct axgbe_port *); + + /* For I2C support when setting device up/down */ + int (*i2c_start)(struct axgbe_port *); + void (*i2c_stop)(struct axgbe_port *); + + /* For performing I2C operations */ + int (*i2c_xfer)(struct axgbe_port *, struct axgbe_i2c_op *); +}; + +/* This structure contains flags that indicate what hardware features + * or configurations are present in the device. + */ +struct axgbe_hw_features { + /* HW Version */ + unsigned int version; + + /* HW Feature Register0 */ + unsigned int gmii; /* 1000 Mbps support */ + unsigned int vlhash; /* VLAN Hash Filter */ + unsigned int sma; /* SMA(MDIO) Interface */ + unsigned int rwk; /* PMT remote wake-up packet */ + unsigned int mgk; /* PMT magic packet */ + unsigned int mmc; /* RMON module */ + unsigned int aoe; /* ARP Offload */ + unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ + unsigned int eee; /* Energy Efficient Ethernet */ + unsigned int tx_coe; /* Tx Checksum Offload */ + unsigned int rx_coe; /* Rx Checksum Offload */ + unsigned int addn_mac; /* Additional MAC Addresses */ + unsigned int ts_src; /* Timestamp Source */ + unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + + /* HW Feature Register1 */ + unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ + unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ + unsigned int adv_ts_hi; /* Advance Timestamping High Word */ + unsigned int dma_width; /* DMA width */ + unsigned int dcb; /* DCB Feature */ + unsigned int sph; /* Split Header Feature */ + unsigned int tso; /* TCP Segmentation Offload */ + unsigned int dma_debug; /* DMA Debug Registers */ + unsigned int rss; /* Receive Side Scaling */ + unsigned int tc_cnt; /* Number of Traffic Classes */ + unsigned int hash_table_size; /* Hash Table Size */ + unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ + + /* HW Feature Register2 */ + unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ + unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ + unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ + unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ + unsigned int pps_out_num; /* Number of PPS outputs */ + unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ +}; + +struct axgbe_version_data { + void (*init_function_ptrs_phy_impl)(struct axgbe_phy_if *); + enum axgbe_xpcs_access xpcs_access; + unsigned int mmc_64bit; + unsigned int tx_max_fifo_size; + unsigned int rx_max_fifo_size; + unsigned int tx_tstamp_workaround; + unsigned int ecc_support; + unsigned int i2c_support; + unsigned int an_cdr_workaround; +}; + +/* + * Structure to store private data for each port. + */ +struct axgbe_port { + /* Ethdev where port belongs*/ + struct rte_eth_dev *eth_dev; + /* Pci dev info */ + const struct rte_pci_device *pci_dev; + /* Version related data */ + struct axgbe_version_data *vdata; + + /* AXGMAC/XPCS related mmio registers */ + void *xgmac_regs; /* AXGMAC CSRs */ + void *xpcs_regs; /* XPCS MMD registers */ + void *xprop_regs; /* AXGBE property registers */ + void *xi2c_regs; /* AXGBE I2C CSRs */ + + bool cdr_track_early; + /* XPCS indirect addressing lock */ + unsigned int xpcs_window_def_reg; + unsigned int xpcs_window_sel_reg; + unsigned int xpcs_window; + unsigned int xpcs_window_size; + unsigned int xpcs_window_mask; + + /* Flags representing axgbe_state */ + unsigned long dev_state; + + struct axgbe_hw_if hw_if; + struct axgbe_phy_if phy_if; + struct axgbe_i2c_if i2c_if; + + /* AXI DMA settings */ + unsigned int coherent; + unsigned int axdomain; + unsigned int arcache; + unsigned int awcache; + + unsigned int tx_max_channel_count; + unsigned int rx_max_channel_count; + unsigned int channel_count; + unsigned int tx_ring_count; + unsigned int tx_desc_count; + unsigned int rx_ring_count; + unsigned int rx_desc_count; + + unsigned int tx_max_q_count; + unsigned int rx_max_q_count; + unsigned int tx_q_count; + unsigned int rx_q_count; + + /* Tx/Rx common settings */ + unsigned int pblx8; + + /* Tx settings */ + unsigned int tx_sf_mode; + unsigned int tx_threshold; + unsigned int tx_pbl; + unsigned int tx_osp_mode; + unsigned int tx_max_fifo_size; + + /* Rx settings */ + unsigned int rx_sf_mode; + unsigned int rx_threshold; + unsigned int rx_pbl; + unsigned int rx_max_fifo_size; + unsigned int rx_buf_size; + + /* Device clocks */ + unsigned long sysclk_rate; + unsigned long ptpclk_rate; + + /* Keeps track of power mode */ + unsigned int power_down; + + /* Current PHY settings */ + int phy_link; + int phy_speed; + + pthread_mutex_t xpcs_mutex; + pthread_mutex_t i2c_mutex; + pthread_mutex_t an_mutex; + pthread_mutex_t phy_mutex; + + /* Flow control settings */ + unsigned int pause_autoneg; + unsigned int tx_pause; + unsigned int rx_pause; + unsigned int rx_rfa[AXGBE_MAX_QUEUES]; + unsigned int rx_rfd[AXGBE_MAX_QUEUES]; + unsigned int fifo; + + /* Receive Side Scaling settings */ + u8 rss_key[AXGBE_RSS_HASH_KEY_SIZE]; + uint32_t rss_table[AXGBE_RSS_MAX_TABLE_SIZE]; + uint32_t rss_options; + int rss_enable; + + /* Hardware features of the device */ + struct axgbe_hw_features hw_feat; + + struct ether_addr mac_addr; + + /* Software Tx/Rx structure pointers*/ + void **rx_queues; + void **tx_queues; + + /* MDIO/PHY related settings */ + unsigned int phy_started; + void *phy_data; + struct axgbe_phy phy; + int mdio_mmd; + unsigned long link_check; + volatile int mdio_completion; + + unsigned int kr_redrv; + + /* Auto-negotiation atate machine support */ + unsigned int an_int; + unsigned int an_status; + enum axgbe_an an_result; + enum axgbe_an an_state; + enum axgbe_rx kr_state; + enum axgbe_rx kx_state; + unsigned int an_supported; + unsigned int parallel_detect; + unsigned int fec_ability; + unsigned long an_start; + enum axgbe_an_mode an_mode; + + /* I2C support */ + struct axgbe_i2c i2c; + volatile int i2c_complete; + + /* CRC stripping by H/w for Rx packet*/ + int crc_strip_enable; + /* csum enable to hardware */ + uint32_t rx_csum_enable; +}; + +void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if); +void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if); +void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if); +void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if); + +#endif /* RTE_ETH_AXGBE_H_ */ diff --git a/drivers/net/axgbe/axgbe_i2c.c b/drivers/net/axgbe/axgbe_i2c.c new file mode 100644 index 00000000..204ec367 --- /dev/null +++ b/drivers/net/axgbe/axgbe_i2c.c @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" + +#define AXGBE_ABORT_COUNT 500 +#define AXGBE_DISABLE_COUNT 1000 + +#define AXGBE_STD_SPEED 1 + +#define AXGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX) +#define AXGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX) +#define AXGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX) +#define AXGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX) +#define AXGBE_DEFAULT_INT_MASK (AXGBE_INTR_RX_FULL | \ + AXGBE_INTR_TX_EMPTY | \ + AXGBE_INTR_TX_ABRT | \ + AXGBE_INTR_STOP_DET) + +#define AXGBE_I2C_READ BIT(8) +#define AXGBE_I2C_STOP BIT(9) + +static int axgbe_i2c_abort(struct axgbe_port *pdata) +{ + unsigned int wait = AXGBE_ABORT_COUNT; + + /* Must be enabled to recognize the abort request */ + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1); + + /* Issue the abort */ + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1); + + while (wait--) { + if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT)) + return 0; + rte_delay_us(500); + } + + return -EBUSY; +} + +static int axgbe_i2c_set_enable(struct axgbe_port *pdata, bool enable) +{ + unsigned int wait = AXGBE_DISABLE_COUNT; + unsigned int mode = enable ? 1 : 0; + + while (wait--) { + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode); + if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode) + return 0; + + rte_delay_us(100); + } + + return -EBUSY; +} + +static int axgbe_i2c_disable(struct axgbe_port *pdata) +{ + unsigned int ret; + + ret = axgbe_i2c_set_enable(pdata, false); + if (ret) { + /* Disable failed, try an abort */ + ret = axgbe_i2c_abort(pdata); + if (ret) + return ret; + + /* Abort succeeded, try to disable again */ + ret = axgbe_i2c_set_enable(pdata, false); + } + + return ret; +} + +static int axgbe_i2c_enable(struct axgbe_port *pdata) +{ + return axgbe_i2c_set_enable(pdata, true); +} + +static void axgbe_i2c_clear_all_interrupts(struct axgbe_port *pdata) +{ + XI2C_IOREAD(pdata, IC_CLR_INTR); +} + +static void axgbe_i2c_disable_interrupts(struct axgbe_port *pdata) +{ + XI2C_IOWRITE(pdata, IC_INTR_MASK, 0); +} + +static void axgbe_i2c_enable_interrupts(struct axgbe_port *pdata) +{ + XI2C_IOWRITE(pdata, IC_INTR_MASK, AXGBE_DEFAULT_INT_MASK); +} + +static void axgbe_i2c_write(struct axgbe_port *pdata) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int tx_slots; + unsigned int cmd; + + /* Configured to never receive Rx overflows, so fill up Tx fifo */ + tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR); + while (tx_slots && state->tx_len) { + if (state->op->cmd == AXGBE_I2C_CMD_READ) + cmd = AXGBE_I2C_READ; + else + cmd = *state->tx_buf++; + + if (state->tx_len == 1) + XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1); + + XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd); + + tx_slots--; + state->tx_len--; + } + + /* No more Tx operations, so ignore TX_EMPTY and return */ + if (!state->tx_len) + XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0); +} + +static void axgbe_i2c_read(struct axgbe_port *pdata) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int rx_slots; + + /* Anything to be read? */ + if (state->op->cmd != AXGBE_I2C_CMD_READ) + return; + + rx_slots = XI2C_IOREAD(pdata, IC_RXFLR); + while (rx_slots && state->rx_len) { + *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD); + state->rx_len--; + rx_slots--; + } +} + +static void axgbe_i2c_clear_isr_interrupts(struct axgbe_port *pdata, + unsigned int isr) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + + if (isr & AXGBE_INTR_TX_ABRT) { + state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE); + XI2C_IOREAD(pdata, IC_CLR_TX_ABRT); + } + + if (isr & AXGBE_INTR_STOP_DET) + XI2C_IOREAD(pdata, IC_CLR_STOP_DET); +} + +static int axgbe_i2c_isr(struct axgbe_port *pdata) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int isr; + + isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT); + + axgbe_i2c_clear_isr_interrupts(pdata, isr); + + if (isr & AXGBE_INTR_TX_ABRT) { + axgbe_i2c_disable_interrupts(pdata); + + state->ret = -EIO; + goto out; + } + + /* Check for data in the Rx fifo */ + axgbe_i2c_read(pdata); + + /* Fill up the Tx fifo next */ + axgbe_i2c_write(pdata); + +out: + /* Complete on an error or STOP condition */ + if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET)) + return 1; + + return 0; +} + +static void axgbe_i2c_set_mode(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XI2C_IOREAD(pdata, IC_CON); + XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1); + XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1); + XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1); + XI2C_SET_BITS(reg, IC_CON, SPEED, AXGBE_STD_SPEED); + XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1); + XI2C_IOWRITE(pdata, IC_CON, reg); +} + +static void axgbe_i2c_get_features(struct axgbe_port *pdata) +{ + struct axgbe_i2c *i2c = &pdata->i2c; + unsigned int reg; + + reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1); + i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + MAX_SPEED_MODE); + i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + RX_BUFFER_DEPTH); + i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + TX_BUFFER_DEPTH); +} + +static void axgbe_i2c_set_target(struct axgbe_port *pdata, unsigned int addr) +{ + XI2C_IOWRITE(pdata, IC_TAR, addr); +} + +static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op) +{ + struct axgbe_i2c_op_state *state = &pdata->i2c.op_state; + int ret; + uint64_t timeout; + + pthread_mutex_lock(&pdata->i2c_mutex); + ret = axgbe_i2c_disable(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "failed to disable i2c master\n"); + return ret; + } + + axgbe_i2c_set_target(pdata, op->target); + + memset(state, 0, sizeof(*state)); + state->op = op; + state->tx_len = op->len; + state->tx_buf = (unsigned char *)op->buf; + state->rx_len = op->len; + state->rx_buf = (unsigned char *)op->buf; + + axgbe_i2c_clear_all_interrupts(pdata); + ret = axgbe_i2c_enable(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "failed to enable i2c master\n"); + return ret; + } + + /* Enabling the interrupts will cause the TX FIFO empty interrupt to + * fire and begin to process the command via the ISR. + */ + axgbe_i2c_enable_interrupts(pdata); + timeout = rte_get_timer_cycles() + rte_get_timer_hz(); + + while (time_before(rte_get_timer_cycles(), timeout)) { + rte_delay_us(100); + if (XI2C_IOREAD(pdata, IC_RAW_INTR_STAT)) { + if (axgbe_i2c_isr(pdata)) + goto success; + } + } + + PMD_DRV_LOG(ERR, "i2c operation timed out\n"); + axgbe_i2c_disable_interrupts(pdata); + axgbe_i2c_disable(pdata); + ret = -ETIMEDOUT; + goto unlock; + +success: + ret = state->ret; + if (ret) { + if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK) + ret = -ENOTCONN; + else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST) + ret = -EAGAIN; + } + +unlock: + pthread_mutex_unlock(&pdata->i2c_mutex); + return ret; +} + +static void axgbe_i2c_stop(struct axgbe_port *pdata) +{ + if (!pdata->i2c.started) + return; + + pdata->i2c.started = 0; + axgbe_i2c_disable_interrupts(pdata); + axgbe_i2c_disable(pdata); + axgbe_i2c_clear_all_interrupts(pdata); +} + +static int axgbe_i2c_start(struct axgbe_port *pdata) +{ + if (pdata->i2c.started) + return 0; + + pdata->i2c.started = 1; + + return 0; +} + +static int axgbe_i2c_init(struct axgbe_port *pdata) +{ + int ret; + + axgbe_i2c_disable_interrupts(pdata); + + ret = axgbe_i2c_disable(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "failed to disable i2c master\n"); + return ret; + } + + axgbe_i2c_get_features(pdata); + + axgbe_i2c_set_mode(pdata); + + axgbe_i2c_clear_all_interrupts(pdata); + + return 0; +} + +void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if) +{ + i2c_if->i2c_init = axgbe_i2c_init; + i2c_if->i2c_start = axgbe_i2c_start; + i2c_if->i2c_stop = axgbe_i2c_stop; + i2c_if->i2c_xfer = axgbe_i2c_xfer; +} diff --git a/drivers/net/axgbe/axgbe_logs.h b/drivers/net/axgbe/axgbe_logs.h new file mode 100644 index 00000000..d1487017 --- /dev/null +++ b/drivers/net/axgbe/axgbe_logs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _AXGBE_LOGS_H_ +#define _AXGBE_LOGS_H_ + +#include + +extern int axgbe_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, axgbe_logtype_init, "%s(): " fmt "\n", \ + __func__, ##args) + +#ifdef RTE_LIBRTE_AXGBE_PMD_DEBUG +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_FUNC_TRACE() do { } while (0) +#endif + +extern int axgbe_logtype_driver; +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, axgbe_logtype_driver, "%s(): " fmt, \ + __func__, ## args) + +#endif /* _AXGBE_LOGS_H_ */ diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c new file mode 100644 index 00000000..2721e5cc --- /dev/null +++ b/drivers/net/axgbe/axgbe_mdio.c @@ -0,0 +1,1066 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" + +static void axgbe_an37_clear_interrupts(struct axgbe_port *pdata) +{ + int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT); + reg &= ~AXGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg); +} + +static void axgbe_an37_disable_interrupts(struct axgbe_port *pdata) +{ + int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); + reg &= ~AXGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); + + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL); + reg &= ~AXGBE_PCS_CL37_BP; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg); +} + +static void axgbe_an73_clear_interrupts(struct axgbe_port *pdata) +{ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); +} + +static void axgbe_an73_disable_interrupts(struct axgbe_port *pdata) +{ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); +} + +static void axgbe_an73_enable_interrupts(struct axgbe_port *pdata) +{ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, + AXGBE_AN_CL73_INT_MASK); +} + +static void axgbe_an_enable_interrupts(struct axgbe_port *pdata) +{ + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_enable_interrupts(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + PMD_DRV_LOG(ERR, "Unsupported AN_MOD_37\n"); + break; + default: + break; + } +} + +static void axgbe_an_clear_interrupts_all(struct axgbe_port *pdata) +{ + axgbe_an73_clear_interrupts(pdata); + axgbe_an37_clear_interrupts(pdata); +} + +static void axgbe_an73_enable_kr_training(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + + reg |= AXGBE_KR_TRAINING_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); +} + +static void axgbe_an73_disable_kr_training(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + + reg &= ~AXGBE_KR_TRAINING_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); +} + +static void axgbe_kr_mode(struct axgbe_port *pdata) +{ + /* Enable KR training */ + axgbe_an73_enable_kr_training(pdata); + + /* Set MAC to 10G speed */ + pdata->hw_if.set_speed(pdata, SPEED_10000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KR); +} + +static void axgbe_kx_2500_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 2.5G speed */ + pdata->hw_if.set_speed(pdata, SPEED_2500); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_2500); +} + +static void axgbe_kx_1000_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_1000); +} + +static void axgbe_sfi_mode(struct axgbe_port *pdata) +{ + /* If a KR re-driver is present, change to KR mode instead */ + if (pdata->kr_redrv) + return axgbe_kr_mode(pdata); + + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 10G speed */ + pdata->hw_if.set_speed(pdata, SPEED_10000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SFI); +} + +static void axgbe_x_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_X); +} + +static void axgbe_sgmii_1000_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_1000); +} + +static void axgbe_sgmii_100_mode(struct axgbe_port *pdata) +{ + /* Disable KR training */ + axgbe_an73_disable_kr_training(pdata); + + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); + + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_100); +} + +static enum axgbe_mode axgbe_cur_mode(struct axgbe_port *pdata) +{ + return pdata->phy_if.phy_impl.cur_mode(pdata); +} + +static bool axgbe_in_kr_mode(struct axgbe_port *pdata) +{ + return axgbe_cur_mode(pdata) == AXGBE_MODE_KR; +} + +static void axgbe_change_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KX_1000: + axgbe_kx_1000_mode(pdata); + break; + case AXGBE_MODE_KX_2500: + axgbe_kx_2500_mode(pdata); + break; + case AXGBE_MODE_KR: + axgbe_kr_mode(pdata); + break; + case AXGBE_MODE_SGMII_100: + axgbe_sgmii_100_mode(pdata); + break; + case AXGBE_MODE_SGMII_1000: + axgbe_sgmii_1000_mode(pdata); + break; + case AXGBE_MODE_X: + axgbe_x_mode(pdata); + break; + case AXGBE_MODE_SFI: + axgbe_sfi_mode(pdata); + break; + case AXGBE_MODE_UNKNOWN: + break; + default: + PMD_DRV_LOG(ERR, "invalid operation mode requested (%u)\n", mode); + } +} + +static void axgbe_switch_mode(struct axgbe_port *pdata) +{ + axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); +} + +static void axgbe_set_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + if (mode == axgbe_cur_mode(pdata)) + return; + + axgbe_change_mode(pdata, mode); +} + +static bool axgbe_use_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + return pdata->phy_if.phy_impl.use_mode(pdata, mode); +} + +static void axgbe_an37_set(struct axgbe_port *pdata, bool enable, + bool restart) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1); + reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE; + + if (enable) + reg |= MDIO_VEND2_CTRL1_AN_ENABLE; + + if (restart) + reg |= MDIO_VEND2_CTRL1_AN_RESTART; + + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg); +} + +static void axgbe_an37_disable(struct axgbe_port *pdata) +{ + axgbe_an37_set(pdata, false, false); + axgbe_an37_disable_interrupts(pdata); +} + +static void axgbe_an73_set(struct axgbe_port *pdata, bool enable, + bool restart) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); + reg &= ~MDIO_AN_CTRL1_ENABLE; + + if (enable) + reg |= MDIO_AN_CTRL1_ENABLE; + + if (restart) + reg |= MDIO_AN_CTRL1_RESTART; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg); +} + +static void axgbe_an73_restart(struct axgbe_port *pdata) +{ + axgbe_an73_enable_interrupts(pdata); + axgbe_an73_set(pdata, true, true); +} + +static void axgbe_an73_disable(struct axgbe_port *pdata) +{ + axgbe_an73_set(pdata, false, false); + axgbe_an73_disable_interrupts(pdata); + pdata->an_start = 0; +} + +static void axgbe_an_restart(struct axgbe_port *pdata) +{ + if (pdata->phy_if.phy_impl.an_pre) + pdata->phy_if.phy_impl.an_pre(pdata); + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_restart(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n"); + break; + default: + break; + } +} + +static void axgbe_an_disable(struct axgbe_port *pdata) +{ + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_disable(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n"); + break; + default: + break; + } +} + +static void axgbe_an_disable_all(struct axgbe_port *pdata) +{ + axgbe_an73_disable(pdata); + axgbe_an37_disable(pdata); +} + +static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + unsigned int ad_reg, lp_reg, reg; + + *state = AXGBE_RX_COMPLETE; + + /* If we're not in KR mode then we're done */ + if (!axgbe_in_kr_mode(pdata)) + return AXGBE_AN_PAGE_RECEIVED; + + /* Enable/Disable FEC */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL); + reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE); + if ((ad_reg & 0xc000) && (lp_reg & 0xc000)) + reg |= pdata->fec_ability; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); + + /* Start KR training */ + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + if (reg & AXGBE_KR_TRAINING_ENABLE) { + if (pdata->phy_if.phy_impl.kr_training_pre) + pdata->phy_if.phy_impl.kr_training_pre(pdata); + + reg |= AXGBE_KR_TRAINING_START; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, + reg); + + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); + } + + return AXGBE_AN_PAGE_RECEIVED; +} + +static enum axgbe_an axgbe_an73_tx_xnp(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + u16 msg; + + *state = AXGBE_RX_XNP; + + msg = AXGBE_XNP_MCF_NULL_MESSAGE; + msg |= AXGBE_XNP_MP_FORMATTED; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0); + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0); + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg); + + return AXGBE_AN_PAGE_RECEIVED; +} + +static enum axgbe_an axgbe_an73_rx_bpa(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + unsigned int link_support; + unsigned int reg, ad_reg, lp_reg; + + /* Read Base Ability register 2 first */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + + /* Check for a supported mode, otherwise restart in a different one */ + link_support = axgbe_in_kr_mode(pdata) ? 0x80 : 0x20; + if (!(reg & link_support)) + return AXGBE_AN_INCOMPAT_LINK; + + /* Check Extended Next Page support */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); + + return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) || + (lp_reg & AXGBE_XNP_NP_EXCHANGE)) + ? axgbe_an73_tx_xnp(pdata, state) + : axgbe_an73_tx_training(pdata, state); +} + +static enum axgbe_an axgbe_an73_rx_xnp(struct axgbe_port *pdata, + enum axgbe_rx *state) +{ + unsigned int ad_reg, lp_reg; + + /* Check Extended Next Page support */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX); + + return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) || + (lp_reg & AXGBE_XNP_NP_EXCHANGE)) + ? axgbe_an73_tx_xnp(pdata, state) + : axgbe_an73_tx_training(pdata, state); +} + +static enum axgbe_an axgbe_an73_page_received(struct axgbe_port *pdata) +{ + enum axgbe_rx *state; + unsigned long an_timeout; + enum axgbe_an ret; + unsigned long ticks; + + if (!pdata->an_start) { + pdata->an_start = rte_get_timer_cycles(); + } else { + an_timeout = pdata->an_start + + msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT); + ticks = rte_get_timer_cycles(); + if (time_after(ticks, an_timeout)) { + /* Auto-negotiation timed out, reset state */ + pdata->kr_state = AXGBE_RX_BPA; + pdata->kx_state = AXGBE_RX_BPA; + + pdata->an_start = rte_get_timer_cycles(); + } + } + + state = axgbe_in_kr_mode(pdata) ? &pdata->kr_state + : &pdata->kx_state; + + switch (*state) { + case AXGBE_RX_BPA: + ret = axgbe_an73_rx_bpa(pdata, state); + break; + case AXGBE_RX_XNP: + ret = axgbe_an73_rx_xnp(pdata, state); + break; + default: + ret = AXGBE_AN_ERROR; + } + + return ret; +} + +static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata) +{ + /* Be sure we aren't looping trying to negotiate */ + if (axgbe_in_kr_mode(pdata)) { + pdata->kr_state = AXGBE_RX_ERROR; + + if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) && + !(pdata->phy.advertising & ADVERTISED_2500baseX_Full)) + return AXGBE_AN_NO_LINK; + + if (pdata->kx_state != AXGBE_RX_BPA) + return AXGBE_AN_NO_LINK; + } else { + pdata->kx_state = AXGBE_RX_ERROR; + + if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full)) + return AXGBE_AN_NO_LINK; + + if (pdata->kr_state != AXGBE_RX_BPA) + return AXGBE_AN_NO_LINK; + } + + axgbe_an_disable(pdata); + axgbe_switch_mode(pdata); + axgbe_an_restart(pdata); + + return AXGBE_AN_INCOMPAT_LINK; +} + +static void axgbe_an73_state_machine(struct axgbe_port *pdata) +{ + enum axgbe_an cur_state = pdata->an_state; + + if (!pdata->an_int) + return; + +next_int: + if (pdata->an_int & AXGBE_AN_CL73_PG_RCV) { + pdata->an_state = AXGBE_AN_PAGE_RECEIVED; + pdata->an_int &= ~AXGBE_AN_CL73_PG_RCV; + } else if (pdata->an_int & AXGBE_AN_CL73_INC_LINK) { + pdata->an_state = AXGBE_AN_INCOMPAT_LINK; + pdata->an_int &= ~AXGBE_AN_CL73_INC_LINK; + } else if (pdata->an_int & AXGBE_AN_CL73_INT_CMPLT) { + pdata->an_state = AXGBE_AN_COMPLETE; + pdata->an_int &= ~AXGBE_AN_CL73_INT_CMPLT; + } else { + pdata->an_state = AXGBE_AN_ERROR; + } + +again: + cur_state = pdata->an_state; + + switch (pdata->an_state) { + case AXGBE_AN_READY: + pdata->an_supported = 0; + break; + case AXGBE_AN_PAGE_RECEIVED: + pdata->an_state = axgbe_an73_page_received(pdata); + pdata->an_supported++; + break; + case AXGBE_AN_INCOMPAT_LINK: + pdata->an_supported = 0; + pdata->parallel_detect = 0; + pdata->an_state = axgbe_an73_incompat_link(pdata); + break; + case AXGBE_AN_COMPLETE: + pdata->parallel_detect = pdata->an_supported ? 0 : 1; + break; + case AXGBE_AN_NO_LINK: + break; + default: + pdata->an_state = AXGBE_AN_ERROR; + } + + if (pdata->an_state == AXGBE_AN_NO_LINK) { + pdata->an_int = 0; + axgbe_an73_clear_interrupts(pdata); + pdata->eth_dev->data->dev_link.link_status = + ETH_LINK_DOWN; + } else if (pdata->an_state == AXGBE_AN_ERROR) { + PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n", + cur_state); + pdata->an_int = 0; + axgbe_an73_clear_interrupts(pdata); + } + + if (pdata->an_state >= AXGBE_AN_COMPLETE) { + pdata->an_result = pdata->an_state; + pdata->an_state = AXGBE_AN_READY; + pdata->kr_state = AXGBE_RX_BPA; + pdata->kx_state = AXGBE_RX_BPA; + pdata->an_start = 0; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + } + + if (cur_state != pdata->an_state) + goto again; + + if (pdata->an_int) + goto next_int; + + axgbe_an73_enable_interrupts(pdata); +} + +static void axgbe_an73_isr(struct axgbe_port *pdata) +{ + /* Disable AN interrupts */ + axgbe_an73_disable_interrupts(pdata); + + /* Save the interrupt(s) that fired */ + pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); + + if (pdata->an_int) { + /* Clear the interrupt(s) that fired and process them */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); + pthread_mutex_lock(&pdata->an_mutex); + axgbe_an73_state_machine(pdata); + pthread_mutex_unlock(&pdata->an_mutex); + } else { + /* Enable AN interrupts */ + axgbe_an73_enable_interrupts(pdata); + } +} + +static void axgbe_an_isr(struct axgbe_port *pdata) +{ + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_isr(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + PMD_DRV_LOG(ERR, "AN_MODE_37 not supported\n"); + break; + default: + break; + } +} + +static void axgbe_an_combined_isr(struct axgbe_port *pdata) +{ + axgbe_an_isr(pdata); +} + +static void axgbe_an73_init(struct axgbe_port *pdata) +{ + unsigned int advertising, reg; + + advertising = pdata->phy_if.phy_impl.an_advertising(pdata); + + /* Set up Advertisement register 3 first */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + if (advertising & ADVERTISED_10000baseR_FEC) + reg |= 0xc000; + else + reg &= ~0xc000; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg); + + /* Set up Advertisement register 2 next */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + if (advertising & ADVERTISED_10000baseKR_Full) + reg |= 0x80; + else + reg &= ~0x80; + + if ((advertising & ADVERTISED_1000baseKX_Full) || + (advertising & ADVERTISED_2500baseX_Full)) + reg |= 0x20; + else + reg &= ~0x20; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg); + + /* Set up Advertisement register 1 last */ + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + if (advertising & ADVERTISED_Pause) + reg |= 0x400; + else + reg &= ~0x400; + + if (advertising & ADVERTISED_Asym_Pause) + reg |= 0x800; + else + reg &= ~0x800; + + /* We don't intend to perform XNP */ + reg &= ~AXGBE_XNP_NP_EXCHANGE; + + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); +} + +static void axgbe_an_init(struct axgbe_port *pdata) +{ + /* Set up advertisement registers based on current settings */ + pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata); + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + axgbe_an73_init(pdata); + break; + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + PMD_DRV_LOG(ERR, "Unsupported AN_CL37\n"); + break; + default: + break; + } +} + +static void axgbe_phy_adjust_link(struct axgbe_port *pdata) +{ + if (pdata->phy.link) { + /* Flow control support */ + pdata->pause_autoneg = pdata->phy.pause_autoneg; + + if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) { + pdata->hw_if.config_tx_flow_control(pdata); + pdata->tx_pause = pdata->phy.tx_pause; + } + + if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) { + pdata->hw_if.config_rx_flow_control(pdata); + pdata->rx_pause = pdata->phy.rx_pause; + } + + /* Speed support */ + if (pdata->phy_speed != pdata->phy.speed) + pdata->phy_speed = pdata->phy.speed; + if (pdata->phy_link != pdata->phy.link) + pdata->phy_link = pdata->phy.link; + } else if (pdata->phy_link) { + pdata->phy_link = 0; + pdata->phy_speed = SPEED_UNKNOWN; + } +} + +static int axgbe_phy_config_fixed(struct axgbe_port *pdata) +{ + enum axgbe_mode mode; + + /* Disable auto-negotiation */ + axgbe_an_disable(pdata); + + /* Set specified mode for specified speed */ + mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed); + switch (mode) { + case AXGBE_MODE_KX_1000: + case AXGBE_MODE_KX_2500: + case AXGBE_MODE_KR: + case AXGBE_MODE_SGMII_100: + case AXGBE_MODE_SGMII_1000: + case AXGBE_MODE_X: + case AXGBE_MODE_SFI: + break; + case AXGBE_MODE_UNKNOWN: + default: + return -EINVAL; + } + + /* Validate duplex mode */ + if (pdata->phy.duplex != DUPLEX_FULL) + return -EINVAL; + + axgbe_set_mode(pdata, mode); + + return 0; +} + +static int __axgbe_phy_config_aneg(struct axgbe_port *pdata) +{ + int ret; + + axgbe_set_bit(AXGBE_LINK_INIT, &pdata->dev_state); + pdata->link_check = rte_get_timer_cycles(); + + ret = pdata->phy_if.phy_impl.an_config(pdata); + if (ret) + return ret; + + if (pdata->phy.autoneg != AUTONEG_ENABLE) { + ret = axgbe_phy_config_fixed(pdata); + if (ret || !pdata->kr_redrv) + return ret; + } + + /* Disable auto-negotiation interrupt */ + rte_intr_disable(&pdata->pci_dev->intr_handle); + + /* Start auto-negotiation in a supported mode */ + if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) { + axgbe_set_mode(pdata, AXGBE_MODE_KR); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) { + axgbe_set_mode(pdata, AXGBE_MODE_KX_2500); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) { + axgbe_set_mode(pdata, AXGBE_MODE_KX_1000); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) { + axgbe_set_mode(pdata, AXGBE_MODE_SFI); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) { + axgbe_set_mode(pdata, AXGBE_MODE_X); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) { + axgbe_set_mode(pdata, AXGBE_MODE_SGMII_1000); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) { + axgbe_set_mode(pdata, AXGBE_MODE_SGMII_100); + } else { + rte_intr_enable(&pdata->pci_dev->intr_handle); + return -EINVAL; + } + + /* Disable and stop any in progress auto-negotiation */ + axgbe_an_disable_all(pdata); + + /* Clear any auto-negotitation interrupts */ + axgbe_an_clear_interrupts_all(pdata); + + pdata->an_result = AXGBE_AN_READY; + pdata->an_state = AXGBE_AN_READY; + pdata->kr_state = AXGBE_RX_BPA; + pdata->kx_state = AXGBE_RX_BPA; + + /* Re-enable auto-negotiation interrupt */ + rte_intr_enable(&pdata->pci_dev->intr_handle); + + axgbe_an_init(pdata); + axgbe_an_restart(pdata); + + return 0; +} + +static int axgbe_phy_config_aneg(struct axgbe_port *pdata) +{ + int ret; + + pthread_mutex_lock(&pdata->an_mutex); + + ret = __axgbe_phy_config_aneg(pdata); + if (ret) + axgbe_set_bit(AXGBE_LINK_ERR, &pdata->dev_state); + else + axgbe_clear_bit(AXGBE_LINK_ERR, &pdata->dev_state); + + pthread_mutex_unlock(&pdata->an_mutex); + + return ret; +} + +static bool axgbe_phy_aneg_done(struct axgbe_port *pdata) +{ + return pdata->an_result == AXGBE_AN_COMPLETE; +} + +static void axgbe_check_link_timeout(struct axgbe_port *pdata) +{ + unsigned long link_timeout; + unsigned long ticks; + + link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT * + 2 * rte_get_timer_hz()); + ticks = rte_get_timer_cycles(); + if (time_after(ticks, link_timeout)) + axgbe_phy_config_aneg(pdata); +} + +static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata) +{ + return pdata->phy_if.phy_impl.an_outcome(pdata); +} + +static void axgbe_phy_status_result(struct axgbe_port *pdata) +{ + enum axgbe_mode mode; + + pdata->phy.lp_advertising = 0; + + if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect) + mode = axgbe_cur_mode(pdata); + else + mode = axgbe_phy_status_aneg(pdata); + + switch (mode) { + case AXGBE_MODE_SGMII_100: + pdata->phy.speed = SPEED_100; + break; + case AXGBE_MODE_X: + case AXGBE_MODE_KX_1000: + case AXGBE_MODE_SGMII_1000: + pdata->phy.speed = SPEED_1000; + break; + case AXGBE_MODE_KX_2500: + pdata->phy.speed = SPEED_2500; + break; + case AXGBE_MODE_KR: + case AXGBE_MODE_SFI: + pdata->phy.speed = SPEED_10000; + break; + case AXGBE_MODE_UNKNOWN: + default: + pdata->phy.speed = SPEED_UNKNOWN; + } + + pdata->phy.duplex = DUPLEX_FULL; + + axgbe_set_mode(pdata, mode); +} + +static void axgbe_phy_status(struct axgbe_port *pdata) +{ + unsigned int link_aneg; + int an_restart; + + if (axgbe_test_bit(AXGBE_LINK_ERR, &pdata->dev_state)) { + pdata->phy.link = 0; + goto adjust_link; + } + + link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE); + + pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata, + &an_restart); + if (an_restart) { + axgbe_phy_config_aneg(pdata); + return; + } + + if (pdata->phy.link) { + if (link_aneg && !axgbe_phy_aneg_done(pdata)) { + axgbe_check_link_timeout(pdata); + return; + } + axgbe_phy_status_result(pdata); + if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) + axgbe_clear_bit(AXGBE_LINK_INIT, &pdata->dev_state); + } else { + if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) { + axgbe_check_link_timeout(pdata); + + if (link_aneg) + return; + } + axgbe_phy_status_result(pdata); + } + +adjust_link: + axgbe_phy_adjust_link(pdata); +} + +static void axgbe_phy_stop(struct axgbe_port *pdata) +{ + if (!pdata->phy_started) + return; + /* Indicate the PHY is down */ + pdata->phy_started = 0; + /* Disable auto-negotiation */ + axgbe_an_disable_all(pdata); + pdata->phy_if.phy_impl.stop(pdata); + pdata->phy.link = 0; + axgbe_phy_adjust_link(pdata); +} + +static int axgbe_phy_start(struct axgbe_port *pdata) +{ + int ret; + + ret = pdata->phy_if.phy_impl.start(pdata); + if (ret) + return ret; + /* Set initial mode - call the mode setting routines + * directly to insure we are properly configured + */ + if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) { + axgbe_kr_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) { + axgbe_kx_2500_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) { + axgbe_kx_1000_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) { + axgbe_sfi_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) { + axgbe_x_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) { + axgbe_sgmii_1000_mode(pdata); + } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) { + axgbe_sgmii_100_mode(pdata); + } else { + ret = -EINVAL; + goto err_stop; + } + /* Indicate the PHY is up and running */ + pdata->phy_started = 1; + axgbe_an_init(pdata); + axgbe_an_enable_interrupts(pdata); + return axgbe_phy_config_aneg(pdata); + +err_stop: + pdata->phy_if.phy_impl.stop(pdata); + + return ret; +} + +static int axgbe_phy_reset(struct axgbe_port *pdata) +{ + int ret; + + ret = pdata->phy_if.phy_impl.reset(pdata); + if (ret) + return ret; + + /* Disable auto-negotiation for now */ + axgbe_an_disable_all(pdata); + + /* Clear auto-negotiation interrupts */ + axgbe_an_clear_interrupts_all(pdata); + + return 0; +} + +static int axgbe_phy_best_advertised_speed(struct axgbe_port *pdata) +{ + if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) + return SPEED_10000; + else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full) + return SPEED_10000; + else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full) + return SPEED_2500; + else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full) + return SPEED_1000; + else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full) + return SPEED_1000; + else if (pdata->phy.advertising & ADVERTISED_100baseT_Full) + return SPEED_100; + + return SPEED_UNKNOWN; +} + +static int axgbe_phy_init(struct axgbe_port *pdata) +{ + int ret; + + pdata->mdio_mmd = MDIO_MMD_PCS; + + /* Check for FEC support */ + pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, + MDIO_PMA_10GBR_FECABLE); + pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE | + MDIO_PMA_10GBR_FECABLE_ERRABLE); + + /* Setup the phy (including supported features) */ + ret = pdata->phy_if.phy_impl.init(pdata); + if (ret) + return ret; + pdata->phy.advertising = pdata->phy.supported; + + pdata->phy.address = 0; + + if (pdata->phy.advertising & ADVERTISED_Autoneg) { + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + } else { + pdata->phy.autoneg = AUTONEG_DISABLE; + pdata->phy.speed = axgbe_phy_best_advertised_speed(pdata); + pdata->phy.duplex = DUPLEX_FULL; + } + + pdata->phy.link = 0; + + pdata->phy.pause_autoneg = pdata->pause_autoneg; + pdata->phy.tx_pause = pdata->tx_pause; + pdata->phy.rx_pause = pdata->rx_pause; + + /* Fix up Flow Control advertising */ + pdata->phy.advertising &= ~ADVERTISED_Pause; + pdata->phy.advertising &= ~ADVERTISED_Asym_Pause; + + if (pdata->rx_pause) { + pdata->phy.advertising |= ADVERTISED_Pause; + pdata->phy.advertising |= ADVERTISED_Asym_Pause; + } + + if (pdata->tx_pause) + pdata->phy.advertising ^= ADVERTISED_Asym_Pause; + return 0; +} + +void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if) +{ + phy_if->phy_init = axgbe_phy_init; + phy_if->phy_reset = axgbe_phy_reset; + phy_if->phy_start = axgbe_phy_start; + phy_if->phy_stop = axgbe_phy_stop; + phy_if->phy_status = axgbe_phy_status; + phy_if->phy_config_aneg = axgbe_phy_config_aneg; + phy_if->an_isr = axgbe_an_combined_isr; +} diff --git a/drivers/net/axgbe/axgbe_phy.h b/drivers/net/axgbe/axgbe_phy.h new file mode 100644 index 00000000..77ee20a3 --- /dev/null +++ b/drivers/net/axgbe/axgbe_phy.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef __AXGBE_PHY_H__ +#define __AXGBE_PHY_H__ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_10000 10000 + + +/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit + * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. + */ +#define MII_ADDR_C45 (1 << 30) + +/* Basic mode status register. */ +#define BMSR_LSTATUS 0x0004 /* Link status */ + +/* Status register 1. */ +#define MDIO_STAT1_LSTATUS BMSR_LSTATUS + +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */ +#define MII_MMD_DATA 0x0e /* MMD Access Data Register */ +#define MII_ESTATUS 0x0f /* Extended Status */ +#define MII_DCOUNTER 0x12 /* Disconnect counter */ +#define MII_FCSCOUNTER 0x13 /* False carrier counter */ +#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */ +#define MII_RERRCOUNTER 0x15 /* Receive error counter */ +#define MII_SREVISION 0x16 /* Silicon revision */ +#define MII_RESV1 0x17 /* Reserved... */ +#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */ +#define MII_PHYADDR 0x19 /* PHY address */ +#define MII_RESV2 0x1a /* Reserved... */ +#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */ +#define MII_NCONFIG 0x1c /* Network interface config */ + +/* Basic mode control register. */ +#define BMCR_RESV 0x003f /* Unused... */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ +#define BMCR_CTST 0x0080 /* Collision test */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */ +#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */ +#define BMCR_PDOWN 0x0800 /* Enable low power state */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +#define BMCR_SPEED100 0x2000 /* Select 100Mbps */ +#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */ +#define BMCR_RESET 0x8000 /* Reset to default state */ +#define BMCR_SPEED10 0x0000 /* Select 10Mbps */ + + +/* MDIO Manageable Devices (MMDs). */ +#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment + * Physical Medium Dependent + */ +#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */ +#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */ +#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */ +#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */ +#define MDIO_MMD_TC 6 /* Transmission Convergence */ +#define MDIO_MMD_AN 7 /* Auto-Negotiation */ +#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */ +#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */ +#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */ + +/* Generic MDIO registers. */ +#define MDIO_CTRL1 MII_BMCR +#define MDIO_STAT1 MII_BMSR +#define MDIO_DEVID1 MII_PHYSID1 +#define MDIO_DEVID2 MII_PHYSID2 +#define MDIO_SPEED 4 /* Speed ability */ +#define MDIO_DEVS1 5 /* Devices in package */ +#define MDIO_DEVS2 6 +#define MDIO_CTRL2 7 /* 10G control 2 */ +#define MDIO_STAT2 8 /* 10G status 2 */ +#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */ +#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */ +#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */ +#define MDIO_PKGID1 14 /* Package identifier */ +#define MDIO_PKGID2 15 +#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */ +#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */ +#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */ +#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */ +#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */ +#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */ +#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */ + +/* Media-dependent registers. */ +#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */ +#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */ +#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A. + * Lanes B-D are numbered 134-136. + */ +#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */ +#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */ +#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */ +#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */ + +/* Control register 1. */ +/* Enable extended speed selection */ +#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100) +/* All speed selection bits */ +#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c) +#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX +#define MDIO_CTRL1_LPOWER BMCR_PDOWN +#define MDIO_CTRL1_RESET BMCR_RESET +#define MDIO_PMA_CTRL1_LOOPBACK 0x0001 +#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000 +#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100 +#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART +#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE +#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */ +#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */ + + + + + +/* PMA 10GBASE-R FEC ability register. */ +#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */ +#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */ + + +/* Autoneg related */ +#define ADVERTISED_Autoneg (1 << 6) +#define SUPPORTED_Autoneg (1 << 6) +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +#define ADVERTISED_Pause (1 << 13) +#define ADVERTISED_Asym_Pause (1 << 14) + +#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Asym_Pause (1 << 14) + +#define SUPPORTED_Backplane (1 << 16) +#define SUPPORTED_TP (1 << 7) + +#define ADVERTISED_10000baseR_FEC (1 << 20) + +#define SUPPORTED_10000baseR_FEC (1 << 20) + +#define SUPPORTED_FIBRE (1 << 10) + +#define ADVERTISED_10000baseKR_Full (1 << 19) +#define ADVERTISED_10000baseT_Full (1 << 12) +#define ADVERTISED_2500baseX_Full (1 << 15) +#define ADVERTISED_1000baseKX_Full (1 << 17) +#define ADVERTISED_1000baseT_Full (1 << 5) +#define ADVERTISED_100baseT_Full (1 << 3) +#define ADVERTISED_TP (1 << 7) +#define ADVERTISED_FIBRE (1 << 10) +#define ADVERTISED_Backplane (1 << 16) + +#define SUPPORTED_1000baseKX_Full (1 << 17) +#define SUPPORTED_10000baseKR_Full (1 << 19) +#define SUPPORTED_2500baseX_Full (1 << 15) +#define SUPPORTED_100baseT_Full (1 << 2) +#define SUPPORTED_1000baseT_Full (1 << 5) +#define SUPPORTED_10000baseT_Full (1 << 12) +#define SUPPORTED_2500baseX_Full (1 << 15) + + +#define SPEED_UNKNOWN -1 + +/* Duplex, half or full. */ +#define DUPLEX_HALF 0x00 +#define DUPLEX_FULL 0x01 +#define DUPLEX_UNKNOWN 0xff + +#endif +/* PHY */ diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c new file mode 100644 index 00000000..973177f6 --- /dev/null +++ b/drivers/net/axgbe/axgbe_phy_impl.c @@ -0,0 +1,2191 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_common.h" +#include "axgbe_phy.h" + +#define AXGBE_PHY_PORT_SPEED_100 BIT(0) +#define AXGBE_PHY_PORT_SPEED_1000 BIT(1) +#define AXGBE_PHY_PORT_SPEED_2500 BIT(2) +#define AXGBE_PHY_PORT_SPEED_10000 BIT(3) + +#define AXGBE_MUTEX_RELEASE 0x80000000 + +#define AXGBE_SFP_DIRECT 7 + +/* I2C target addresses */ +#define AXGBE_SFP_SERIAL_ID_ADDRESS 0x50 +#define AXGBE_SFP_DIAG_INFO_ADDRESS 0x51 +#define AXGBE_SFP_PHY_ADDRESS 0x56 +#define AXGBE_GPIO_ADDRESS_PCA9555 0x20 + +/* SFP sideband signal indicators */ +#define AXGBE_GPIO_NO_TX_FAULT BIT(0) +#define AXGBE_GPIO_NO_RATE_SELECT BIT(1) +#define AXGBE_GPIO_NO_MOD_ABSENT BIT(2) +#define AXGBE_GPIO_NO_RX_LOS BIT(3) + +/* Rate-change complete wait/retry count */ +#define AXGBE_RATECHANGE_COUNT 500 + +/* CDR delay values for KR support (in usec) */ +#define AXGBE_CDR_DELAY_INIT 10000 +#define AXGBE_CDR_DELAY_INC 10000 +#define AXGBE_CDR_DELAY_MAX 100000 + +enum axgbe_port_mode { + AXGBE_PORT_MODE_RSVD = 0, + AXGBE_PORT_MODE_BACKPLANE, + AXGBE_PORT_MODE_BACKPLANE_2500, + AXGBE_PORT_MODE_1000BASE_T, + AXGBE_PORT_MODE_1000BASE_X, + AXGBE_PORT_MODE_NBASE_T, + AXGBE_PORT_MODE_10GBASE_T, + AXGBE_PORT_MODE_10GBASE_R, + AXGBE_PORT_MODE_SFP, + AXGBE_PORT_MODE_MAX, +}; + +enum axgbe_conn_type { + AXGBE_CONN_TYPE_NONE = 0, + AXGBE_CONN_TYPE_SFP, + AXGBE_CONN_TYPE_MDIO, + AXGBE_CONN_TYPE_RSVD1, + AXGBE_CONN_TYPE_BACKPLANE, + AXGBE_CONN_TYPE_MAX, +}; + +/* SFP/SFP+ related definitions */ +enum axgbe_sfp_comm { + AXGBE_SFP_COMM_DIRECT = 0, + AXGBE_SFP_COMM_PCA9545, +}; + +enum axgbe_sfp_cable { + AXGBE_SFP_CABLE_UNKNOWN = 0, + AXGBE_SFP_CABLE_ACTIVE, + AXGBE_SFP_CABLE_PASSIVE, +}; + +enum axgbe_sfp_base { + AXGBE_SFP_BASE_UNKNOWN = 0, + AXGBE_SFP_BASE_1000_T, + AXGBE_SFP_BASE_1000_SX, + AXGBE_SFP_BASE_1000_LX, + AXGBE_SFP_BASE_1000_CX, + AXGBE_SFP_BASE_10000_SR, + AXGBE_SFP_BASE_10000_LR, + AXGBE_SFP_BASE_10000_LRM, + AXGBE_SFP_BASE_10000_ER, + AXGBE_SFP_BASE_10000_CR, +}; + +enum axgbe_sfp_speed { + AXGBE_SFP_SPEED_UNKNOWN = 0, + AXGBE_SFP_SPEED_100_1000, + AXGBE_SFP_SPEED_1000, + AXGBE_SFP_SPEED_10000, +}; + +/* SFP Serial ID Base ID values relative to an offset of 0 */ +#define AXGBE_SFP_BASE_ID 0 +#define AXGBE_SFP_ID_SFP 0x03 + +#define AXGBE_SFP_BASE_EXT_ID 1 +#define AXGBE_SFP_EXT_ID_SFP 0x04 + +#define AXGBE_SFP_BASE_10GBE_CC 3 +#define AXGBE_SFP_BASE_10GBE_CC_SR BIT(4) +#define AXGBE_SFP_BASE_10GBE_CC_LR BIT(5) +#define AXGBE_SFP_BASE_10GBE_CC_LRM BIT(6) +#define AXGBE_SFP_BASE_10GBE_CC_ER BIT(7) + +#define AXGBE_SFP_BASE_1GBE_CC 6 +#define AXGBE_SFP_BASE_1GBE_CC_SX BIT(0) +#define AXGBE_SFP_BASE_1GBE_CC_LX BIT(1) +#define AXGBE_SFP_BASE_1GBE_CC_CX BIT(2) +#define AXGBE_SFP_BASE_1GBE_CC_T BIT(3) + +#define AXGBE_SFP_BASE_CABLE 8 +#define AXGBE_SFP_BASE_CABLE_PASSIVE BIT(2) +#define AXGBE_SFP_BASE_CABLE_ACTIVE BIT(3) + +#define AXGBE_SFP_BASE_BR 12 +#define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a +#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d +#define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64 +#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68 + +#define AXGBE_SFP_BASE_CU_CABLE_LEN 18 + +#define AXGBE_SFP_BASE_VENDOR_NAME 20 +#define AXGBE_SFP_BASE_VENDOR_NAME_LEN 16 +#define AXGBE_SFP_BASE_VENDOR_PN 40 +#define AXGBE_SFP_BASE_VENDOR_PN_LEN 16 +#define AXGBE_SFP_BASE_VENDOR_REV 56 +#define AXGBE_SFP_BASE_VENDOR_REV_LEN 4 + +#define AXGBE_SFP_BASE_CC 63 + +/* SFP Serial ID Extended ID values relative to an offset of 64 */ +#define AXGBE_SFP_BASE_VENDOR_SN 4 +#define AXGBE_SFP_BASE_VENDOR_SN_LEN 16 + +#define AXGBE_SFP_EXTD_DIAG 28 +#define AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) + +#define AXGBE_SFP_EXTD_SFF_8472 30 + +#define AXGBE_SFP_EXTD_CC 31 + +struct axgbe_sfp_eeprom { + u8 base[64]; + u8 extd[32]; + u8 vendor[32]; +}; + +#define AXGBE_BEL_FUSE_VENDOR "BEL-FUSE" +#define AXGBE_BEL_FUSE_PARTNO "1GBT-SFP06" + +struct axgbe_sfp_ascii { + union { + char vendor[AXGBE_SFP_BASE_VENDOR_NAME_LEN + 1]; + char partno[AXGBE_SFP_BASE_VENDOR_PN_LEN + 1]; + char rev[AXGBE_SFP_BASE_VENDOR_REV_LEN + 1]; + char serno[AXGBE_SFP_BASE_VENDOR_SN_LEN + 1]; + } u; +}; + +/* MDIO PHY reset types */ +enum axgbe_mdio_reset { + AXGBE_MDIO_RESET_NONE = 0, + AXGBE_MDIO_RESET_I2C_GPIO, + AXGBE_MDIO_RESET_INT_GPIO, + AXGBE_MDIO_RESET_MAX, +}; + +/* Re-driver related definitions */ +enum axgbe_phy_redrv_if { + AXGBE_PHY_REDRV_IF_MDIO = 0, + AXGBE_PHY_REDRV_IF_I2C, + AXGBE_PHY_REDRV_IF_MAX, +}; + +enum axgbe_phy_redrv_model { + AXGBE_PHY_REDRV_MODEL_4223 = 0, + AXGBE_PHY_REDRV_MODEL_4227, + AXGBE_PHY_REDRV_MODEL_MAX, +}; + +enum axgbe_phy_redrv_mode { + AXGBE_PHY_REDRV_MODE_CX = 5, + AXGBE_PHY_REDRV_MODE_SR = 9, +}; + +#define AXGBE_PHY_REDRV_MODE_REG 0x12b0 + +/* PHY related configuration information */ +struct axgbe_phy_data { + enum axgbe_port_mode port_mode; + + unsigned int port_id; + + unsigned int port_speeds; + + enum axgbe_conn_type conn_type; + + enum axgbe_mode cur_mode; + enum axgbe_mode start_mode; + + unsigned int rrc_count; + + unsigned int mdio_addr; + + unsigned int comm_owned; + + /* SFP Support */ + enum axgbe_sfp_comm sfp_comm; + unsigned int sfp_mux_address; + unsigned int sfp_mux_channel; + + unsigned int sfp_gpio_address; + unsigned int sfp_gpio_mask; + unsigned int sfp_gpio_rx_los; + unsigned int sfp_gpio_tx_fault; + unsigned int sfp_gpio_mod_absent; + unsigned int sfp_gpio_rate_select; + + unsigned int sfp_rx_los; + unsigned int sfp_tx_fault; + unsigned int sfp_mod_absent; + unsigned int sfp_diags; + unsigned int sfp_changed; + unsigned int sfp_phy_avail; + unsigned int sfp_cable_len; + enum axgbe_sfp_base sfp_base; + enum axgbe_sfp_cable sfp_cable; + enum axgbe_sfp_speed sfp_speed; + struct axgbe_sfp_eeprom sfp_eeprom; + + /* External PHY support */ + enum axgbe_mdio_mode phydev_mode; + enum axgbe_mdio_reset mdio_reset; + unsigned int mdio_reset_addr; + unsigned int mdio_reset_gpio; + + /* Re-driver support */ + unsigned int redrv; + unsigned int redrv_if; + unsigned int redrv_addr; + unsigned int redrv_lane; + unsigned int redrv_model; + + /* KR AN support */ + unsigned int phy_cdr_notrack; + unsigned int phy_cdr_delay; +}; + +static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata); + +static int axgbe_phy_i2c_xfer(struct axgbe_port *pdata, + struct axgbe_i2c_op *i2c_op) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* Be sure we own the bus */ + if (!phy_data->comm_owned) + return -EIO; + + return pdata->i2c_if.i2c_xfer(pdata, i2c_op); +} + +static int axgbe_phy_redrv_write(struct axgbe_port *pdata, unsigned int reg, + unsigned int val) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_i2c_op i2c_op; + uint16_t *redrv_val; + u8 redrv_data[5], csum; + unsigned int i, retry; + int ret; + + /* High byte of register contains read/write indicator */ + redrv_data[0] = ((reg >> 8) & 0xff) << 1; + redrv_data[1] = reg & 0xff; + redrv_val = (uint16_t *)&redrv_data[2]; + *redrv_val = rte_cpu_to_be_16(val); + + /* Calculate 1 byte checksum */ + csum = 0; + for (i = 0; i < 4; i++) { + csum += redrv_data[i]; + if (redrv_data[i] > csum) + csum++; + } + redrv_data[4] = ~csum; + + retry = 1; +again1: + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->redrv_addr; + i2c_op.len = sizeof(redrv_data); + i2c_op.buf = redrv_data; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again1; + + return ret; + } + + retry = 1; +again2: + i2c_op.cmd = AXGBE_I2C_CMD_READ; + i2c_op.target = phy_data->redrv_addr; + i2c_op.len = 1; + i2c_op.buf = redrv_data; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again2; + + return ret; + } + + if (redrv_data[0] != 0xff) { + PMD_DRV_LOG(ERR, "Redriver write checksum error\n"); + ret = -EIO; + } + + return ret; +} + +static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target, + void *reg, unsigned int reg_len, + void *val, unsigned int val_len) +{ + struct axgbe_i2c_op i2c_op; + int retry, ret; + + retry = 1; +again1: + /* Set the specified register to read */ + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = target; + i2c_op.len = reg_len; + i2c_op.buf = reg; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again1; + + return ret; + } + + retry = 1; +again2: + /* Read the specfied register */ + i2c_op.cmd = AXGBE_I2C_CMD_READ; + i2c_op.target = target; + i2c_op.len = val_len; + i2c_op.buf = val; + ret = axgbe_phy_i2c_xfer(pdata, &i2c_op); + if ((ret == -EAGAIN) && retry--) + goto again2; + + return ret; +} + +static int axgbe_phy_sfp_put_mux(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_i2c_op i2c_op; + uint8_t mux_channel; + + if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT) + return 0; + + /* Select no mux channels */ + mux_channel = 0; + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->sfp_mux_address; + i2c_op.len = sizeof(mux_channel); + i2c_op.buf = &mux_channel; + + return axgbe_phy_i2c_xfer(pdata, &i2c_op); +} + +static int axgbe_phy_sfp_get_mux(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_i2c_op i2c_op; + u8 mux_channel; + + if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT) + return 0; + + /* Select desired mux channel */ + mux_channel = 1 << phy_data->sfp_mux_channel; + i2c_op.cmd = AXGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->sfp_mux_address; + i2c_op.len = sizeof(mux_channel); + i2c_op.buf = &mux_channel; + + return axgbe_phy_i2c_xfer(pdata, &i2c_op); +} + +static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + phy_data->comm_owned = 0; + + pthread_mutex_unlock(&pdata->phy_mutex); +} + +static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + uint64_t timeout; + unsigned int mutex_id; + + if (phy_data->comm_owned) + return 0; + + /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices, + * the driver needs to take the software mutex and then the hardware + * mutexes before being able to use the busses. + */ + pthread_mutex_lock(&pdata->phy_mutex); + + /* Clear the mutexes */ + XP_IOWRITE(pdata, XP_I2C_MUTEX, AXGBE_MUTEX_RELEASE); + XP_IOWRITE(pdata, XP_MDIO_MUTEX, AXGBE_MUTEX_RELEASE); + + /* Mutex formats are the same for I2C and MDIO/GPIO */ + mutex_id = 0; + XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id); + XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1); + + timeout = rte_get_timer_cycles() + (rte_get_timer_hz() * 5); + while (time_before(rte_get_timer_cycles(), timeout)) { + /* Must be all zeroes in order to obtain the mutex */ + if (XP_IOREAD(pdata, XP_I2C_MUTEX) || + XP_IOREAD(pdata, XP_MDIO_MUTEX)) { + rte_delay_us(100); + continue; + } + + /* Obtain the mutex */ + XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id); + XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id); + + phy_data->comm_owned = 1; + return 0; + } + + pthread_mutex_unlock(&pdata->phy_mutex); + + PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n"); + + return -ETIMEDOUT; +} + +static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->sfp_mod_absent) { + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.advertising = pdata->phy.supported; + } + + pdata->phy.advertising &= ~ADVERTISED_Autoneg; + pdata->phy.advertising &= ~ADVERTISED_TP; + pdata->phy.advertising &= ~ADVERTISED_FIBRE; + pdata->phy.advertising &= ~ADVERTISED_100baseT_Full; + pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full; + pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full; + pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC; + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.advertising |= ADVERTISED_Autoneg; + break; + case AXGBE_SFP_BASE_10000_SR: + case AXGBE_SFP_BASE_10000_LR: + case AXGBE_SFP_BASE_10000_LRM: + case AXGBE_SFP_BASE_10000_ER: + case AXGBE_SFP_BASE_10000_CR: + default: + pdata->phy.speed = SPEED_10000; + pdata->phy.duplex = DUPLEX_FULL; + pdata->phy.autoneg = AUTONEG_DISABLE; + break; + } + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + case AXGBE_SFP_BASE_1000_CX: + case AXGBE_SFP_BASE_10000_CR: + pdata->phy.advertising |= ADVERTISED_TP; + break; + default: + pdata->phy.advertising |= ADVERTISED_FIBRE; + } + + switch (phy_data->sfp_speed) { + case AXGBE_SFP_SPEED_100_1000: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) + pdata->phy.advertising |= ADVERTISED_100baseT_Full; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + pdata->phy.advertising |= ADVERTISED_1000baseT_Full; + break; + case AXGBE_SFP_SPEED_1000: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + pdata->phy.advertising |= ADVERTISED_1000baseT_Full; + break; + case AXGBE_SFP_SPEED_10000: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) + pdata->phy.advertising |= ADVERTISED_10000baseT_Full; + break; + default: + /* Choose the fastest supported speed */ + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) + pdata->phy.advertising |= ADVERTISED_10000baseT_Full; + else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + pdata->phy.advertising |= ADVERTISED_1000baseT_Full; + else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) + pdata->phy.advertising |= ADVERTISED_100baseT_Full; + } +} + +static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom, + enum axgbe_sfp_speed sfp_speed) +{ + u8 *sfp_base, min, max; + + sfp_base = sfp_eeprom->base; + + switch (sfp_speed) { + case AXGBE_SFP_SPEED_1000: + min = AXGBE_SFP_BASE_BR_1GBE_MIN; + max = AXGBE_SFP_BASE_BR_1GBE_MAX; + break; + case AXGBE_SFP_SPEED_10000: + min = AXGBE_SFP_BASE_BR_10GBE_MIN; + max = AXGBE_SFP_BASE_BR_10GBE_MAX; + break; + default: + return false; + } + + return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) && + (sfp_base[AXGBE_SFP_BASE_BR] <= max)); +} + +static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (!phy_data->sfp_changed) + return; + + phy_data->sfp_phy_avail = 0; + + if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T) + return; +} + +static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + + if (memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_NAME], + AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR))) + return false; + + if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN], + AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) { + phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX; + phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE; + phy_data->sfp_speed = AXGBE_SFP_SPEED_1000; + return true; + } + + return false; +} + +static bool axgbe_phy_sfp_parse_quirks(struct axgbe_port *pdata) +{ + if (axgbe_phy_belfuse_parse_quirks(pdata)) + return true; + + return false; +} + +static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + uint8_t *sfp_base; + + sfp_base = sfp_eeprom->base; + + if (sfp_base[AXGBE_SFP_BASE_ID] != AXGBE_SFP_ID_SFP) + return; + + if (sfp_base[AXGBE_SFP_BASE_EXT_ID] != AXGBE_SFP_EXT_ID_SFP) + return; + + if (axgbe_phy_sfp_parse_quirks(pdata)) + return; + + /* Assume ACTIVE cable unless told it is PASSIVE */ + if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) { + phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE; + phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN]; + } else { + phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE; + } + + /* Determine the type of SFP */ + if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & + AXGBE_SFP_BASE_10GBE_CC_LRM) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_LRM; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_ER) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_ER; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_SX) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_LX) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_LX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_CX) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_T; + else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) && + axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + phy_data->sfp_speed = AXGBE_SFP_SPEED_100_1000; + break; + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + phy_data->sfp_speed = AXGBE_SFP_SPEED_1000; + break; + case AXGBE_SFP_BASE_10000_SR: + case AXGBE_SFP_BASE_10000_LR: + case AXGBE_SFP_BASE_10000_LRM: + case AXGBE_SFP_BASE_10000_ER: + case AXGBE_SFP_BASE_10000_CR: + phy_data->sfp_speed = AXGBE_SFP_SPEED_10000; + break; + default: + break; + } +} + +static bool axgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf, + unsigned int len) +{ + uint8_t cc; + + for (cc = 0; len; buf++, len--) + cc += *buf; + + return (cc == cc_in) ? true : false; +} + +static int axgbe_phy_sfp_read_eeprom(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + struct axgbe_sfp_eeprom sfp_eeprom; + uint8_t eeprom_addr; + int ret; + + ret = axgbe_phy_sfp_get_mux(pdata); + if (ret) { + PMD_DRV_LOG(ERR, "I2C error setting SFP MUX\n"); + return ret; + } + + /* Read the SFP serial ID eeprom */ + eeprom_addr = 0; + ret = axgbe_phy_i2c_read(pdata, AXGBE_SFP_SERIAL_ID_ADDRESS, + &eeprom_addr, sizeof(eeprom_addr), + &sfp_eeprom, sizeof(sfp_eeprom)); + if (ret) { + PMD_DRV_LOG(ERR, "I2C error reading SFP EEPROM\n"); + goto put; + } + + /* Validate the contents read */ + if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[AXGBE_SFP_BASE_CC], + sfp_eeprom.base, + sizeof(sfp_eeprom.base) - 1)) { + ret = -EINVAL; + goto put; + } + + if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[AXGBE_SFP_EXTD_CC], + sfp_eeprom.extd, + sizeof(sfp_eeprom.extd) - 1)) { + ret = -EINVAL; + goto put; + } + + /* Check for an added or changed SFP */ + if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) { + phy_data->sfp_changed = 1; + memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom)); + + if (sfp_eeprom.extd[AXGBE_SFP_EXTD_SFF_8472]) { + uint8_t diag_type; + diag_type = sfp_eeprom.extd[AXGBE_SFP_EXTD_DIAG]; + + if (!(diag_type & AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) + phy_data->sfp_diags = 1; + } + } else { + phy_data->sfp_changed = 0; + } + +put: + axgbe_phy_sfp_put_mux(pdata); + + return ret; +} + +static void axgbe_phy_sfp_signals(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int gpio_input; + u8 gpio_reg, gpio_ports[2]; + int ret; + + /* Read the input port registers */ + gpio_reg = 0; + ret = axgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address, + &gpio_reg, sizeof(gpio_reg), + gpio_ports, sizeof(gpio_ports)); + if (ret) { + PMD_DRV_LOG(ERR, "I2C error reading SFP GPIOs\n"); + return; + } + + gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; + + if (phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_MOD_ABSENT) { + /* No GPIO, just assume the module is present for now */ + phy_data->sfp_mod_absent = 0; + } else { + if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent))) + phy_data->sfp_mod_absent = 0; + } + + if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_RX_LOS) && + (gpio_input & (1 << phy_data->sfp_gpio_rx_los))) + phy_data->sfp_rx_los = 1; + + if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_TX_FAULT) && + (gpio_input & (1 << phy_data->sfp_gpio_tx_fault))) + phy_data->sfp_tx_fault = 1; +} + +static void axgbe_phy_sfp_mod_absent(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + phy_data->sfp_mod_absent = 1; + phy_data->sfp_phy_avail = 0; + memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom)); +} + +static void axgbe_phy_sfp_reset(struct axgbe_phy_data *phy_data) +{ + phy_data->sfp_rx_los = 0; + phy_data->sfp_tx_fault = 0; + phy_data->sfp_mod_absent = 1; + phy_data->sfp_diags = 0; + phy_data->sfp_base = AXGBE_SFP_BASE_UNKNOWN; + phy_data->sfp_cable = AXGBE_SFP_CABLE_UNKNOWN; + phy_data->sfp_speed = AXGBE_SFP_SPEED_UNKNOWN; +} + +static void axgbe_phy_sfp_detect(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + /* Reset the SFP signals and info */ + axgbe_phy_sfp_reset(phy_data); + + ret = axgbe_phy_get_comm_ownership(pdata); + if (ret) + return; + + /* Read the SFP signals and check for module presence */ + axgbe_phy_sfp_signals(pdata); + if (phy_data->sfp_mod_absent) { + axgbe_phy_sfp_mod_absent(pdata); + goto put; + } + + ret = axgbe_phy_sfp_read_eeprom(pdata); + if (ret) { + /* Treat any error as if there isn't an SFP plugged in */ + axgbe_phy_sfp_reset(phy_data); + axgbe_phy_sfp_mod_absent(pdata); + goto put; + } + + axgbe_phy_sfp_parse_eeprom(pdata); + axgbe_phy_sfp_external_phy(pdata); + +put: + axgbe_phy_sfp_phy_settings(pdata); + axgbe_phy_put_comm_ownership(pdata); +} + +static void axgbe_phy_phydev_flowctrl(struct axgbe_port *pdata) +{ + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; +} + +static enum axgbe_mode axgbe_phy_an73_redrv_outcome(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + enum axgbe_mode mode; + unsigned int ad_reg, lp_reg; + + pdata->phy.lp_advertising |= ADVERTISED_Autoneg; + pdata->phy.lp_advertising |= ADVERTISED_Backplane; + + /* Use external PHY to determine flow control */ + if (pdata->phy.pause_autoneg) + axgbe_phy_phydev_flowctrl(pdata); + + /* Compare Advertisement and Link Partner register 2 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + if (lp_reg & 0x80) + pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + if (lp_reg & 0x20) + pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + + ad_reg &= lp_reg; + if (ad_reg & 0x80) { + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + mode = AXGBE_MODE_KR; + break; + default: + mode = AXGBE_MODE_SFI; + break; + } + } else if (ad_reg & 0x20) { + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + mode = AXGBE_MODE_KX_1000; + break; + case AXGBE_PORT_MODE_1000BASE_X: + mode = AXGBE_MODE_X; + break; + case AXGBE_PORT_MODE_SFP: + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + mode = AXGBE_MODE_SGMII_1000; + break; + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + default: + mode = AXGBE_MODE_X; + break; + } + break; + default: + mode = AXGBE_MODE_SGMII_1000; + break; + } + } else { + mode = AXGBE_MODE_UNKNOWN; + } + + /* Compare Advertisement and Link Partner register 3 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + if (lp_reg & 0xc000) + pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + + return mode; +} + +static enum axgbe_mode axgbe_phy_an73_outcome(struct axgbe_port *pdata) +{ + enum axgbe_mode mode; + unsigned int ad_reg, lp_reg; + + pdata->phy.lp_advertising |= ADVERTISED_Autoneg; + pdata->phy.lp_advertising |= ADVERTISED_Backplane; + + /* Compare Advertisement and Link Partner register 1 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); + if (lp_reg & 0x400) + pdata->phy.lp_advertising |= ADVERTISED_Pause; + if (lp_reg & 0x800) + pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + + if (pdata->phy.pause_autoneg) { + /* Set flow control based on auto-negotiation result */ + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; + + if (ad_reg & lp_reg & 0x400) { + pdata->phy.tx_pause = 1; + pdata->phy.rx_pause = 1; + } else if (ad_reg & lp_reg & 0x800) { + if (ad_reg & 0x400) + pdata->phy.rx_pause = 1; + else if (lp_reg & 0x400) + pdata->phy.tx_pause = 1; + } + } + + /* Compare Advertisement and Link Partner register 2 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + if (lp_reg & 0x80) + pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + if (lp_reg & 0x20) + pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + + ad_reg &= lp_reg; + if (ad_reg & 0x80) + mode = AXGBE_MODE_KR; + else if (ad_reg & 0x20) + mode = AXGBE_MODE_KX_1000; + else + mode = AXGBE_MODE_UNKNOWN; + + /* Compare Advertisement and Link Partner register 3 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + if (lp_reg & 0xc000) + pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + + return mode; +} + +static enum axgbe_mode axgbe_phy_an_outcome(struct axgbe_port *pdata) +{ + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + return axgbe_phy_an73_outcome(pdata); + case AXGBE_AN_MODE_CL73_REDRV: + return axgbe_phy_an73_redrv_outcome(pdata); + case AXGBE_AN_MODE_CL37: + case AXGBE_AN_MODE_CL37_SGMII: + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static unsigned int axgbe_phy_an_advertising(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int advertising; + + /* Without a re-driver, just return current advertising */ + if (!phy_data->redrv) + return pdata->phy.advertising; + + /* With the KR re-driver we need to advertise a single speed */ + advertising = pdata->phy.advertising; + advertising &= ~ADVERTISED_1000baseKX_Full; + advertising &= ~ADVERTISED_10000baseKR_Full; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + advertising |= ADVERTISED_10000baseKR_Full; + break; + case AXGBE_PORT_MODE_BACKPLANE_2500: + advertising |= ADVERTISED_1000baseKX_Full; + break; + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_NBASE_T: + advertising |= ADVERTISED_1000baseKX_Full; + break; + case AXGBE_PORT_MODE_10GBASE_T: + PMD_DRV_LOG(ERR, "10GBASE_T mode is not supported\n"); + break; + case AXGBE_PORT_MODE_10GBASE_R: + advertising |= ADVERTISED_10000baseKR_Full; + break; + case AXGBE_PORT_MODE_SFP: + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + advertising |= ADVERTISED_1000baseKX_Full; + break; + default: + advertising |= ADVERTISED_10000baseKR_Full; + break; + } + break; + default: + advertising |= ADVERTISED_10000baseKR_Full; + break; + } + + return advertising; +} + +static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused) +{ + return 0; + /* Dummy API since there is no case to support + * external phy devices registred through kerenl apis + */ +} + +static enum axgbe_an_mode axgbe_phy_an_sfp_mode(struct axgbe_phy_data *phy_data) +{ + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: + return AXGBE_AN_MODE_CL37_SGMII; + case AXGBE_SFP_BASE_1000_SX: + case AXGBE_SFP_BASE_1000_LX: + case AXGBE_SFP_BASE_1000_CX: + return AXGBE_AN_MODE_CL37; + default: + return AXGBE_AN_MODE_NONE; + } +} + +static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* A KR re-driver will always require CL73 AN */ + if (phy_data->redrv) + return AXGBE_AN_MODE_CL73_REDRV; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return AXGBE_AN_MODE_CL73; + case AXGBE_PORT_MODE_BACKPLANE_2500: + return AXGBE_AN_MODE_NONE; + case AXGBE_PORT_MODE_1000BASE_T: + return AXGBE_AN_MODE_CL37_SGMII; + case AXGBE_PORT_MODE_1000BASE_X: + return AXGBE_AN_MODE_CL37; + case AXGBE_PORT_MODE_NBASE_T: + return AXGBE_AN_MODE_CL37_SGMII; + case AXGBE_PORT_MODE_10GBASE_T: + return AXGBE_AN_MODE_CL73; + case AXGBE_PORT_MODE_10GBASE_R: + return AXGBE_AN_MODE_NONE; + case AXGBE_PORT_MODE_SFP: + return axgbe_phy_an_sfp_mode(phy_data); + default: + return AXGBE_AN_MODE_NONE; + } +} + +static int axgbe_phy_set_redrv_mode_mdio(struct axgbe_port *pdata, + enum axgbe_phy_redrv_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + u16 redrv_reg, redrv_val; + + redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); + redrv_val = (u16)mode; + + return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr, + redrv_reg, redrv_val); +} + +static int axgbe_phy_set_redrv_mode_i2c(struct axgbe_port *pdata, + enum axgbe_phy_redrv_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int redrv_reg; + int ret; + + /* Calculate the register to write */ + redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); + + ret = axgbe_phy_redrv_write(pdata, redrv_reg, mode); + + return ret; +} + +static void axgbe_phy_set_redrv_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + enum axgbe_phy_redrv_mode mode; + int ret; + + if (!phy_data->redrv) + return; + + mode = AXGBE_PHY_REDRV_MODE_CX; + if ((phy_data->port_mode == AXGBE_PORT_MODE_SFP) && + (phy_data->sfp_base != AXGBE_SFP_BASE_1000_CX) && + (phy_data->sfp_base != AXGBE_SFP_BASE_10000_CR)) + mode = AXGBE_PHY_REDRV_MODE_SR; + + ret = axgbe_phy_get_comm_ownership(pdata); + if (ret) + return; + + if (phy_data->redrv_if) + axgbe_phy_set_redrv_mode_i2c(pdata, mode); + else + axgbe_phy_set_redrv_mode_mdio(pdata, mode); + + axgbe_phy_put_comm_ownership(pdata); +} + +static void axgbe_phy_start_ratechange(struct axgbe_port *pdata) +{ + if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) + return; +} + +static void axgbe_phy_complete_ratechange(struct axgbe_port *pdata) +{ + unsigned int wait; + + /* Wait for command to complete */ + wait = AXGBE_RATECHANGE_COUNT; + while (wait--) { + if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) + return; + + rte_delay_us(1500); + } +} + +static void axgbe_phy_rrc(struct axgbe_port *pdata) +{ + unsigned int s0; + + axgbe_phy_start_ratechange(pdata); + + /* Receiver Reset Cycle */ + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5); + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0); + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + + axgbe_phy_complete_ratechange(pdata); +} + +static void axgbe_phy_power_off(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + axgbe_phy_start_ratechange(pdata); + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + axgbe_phy_complete_ratechange(pdata); + phy_data->cur_mode = AXGBE_MODE_UNKNOWN; +} + +static void axgbe_phy_sfi_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int s0; + + axgbe_phy_set_redrv_mode(pdata); + + axgbe_phy_start_ratechange(pdata); + + /* 10G/SFI */ + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3); + if (phy_data->sfp_cable != AXGBE_SFP_CABLE_PASSIVE) { + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0); + } else { + if (phy_data->sfp_cable_len <= 1) + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1); + else if (phy_data->sfp_cable_len <= 3) + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2); + else + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3); + } + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + axgbe_phy_complete_ratechange(pdata); + phy_data->cur_mode = AXGBE_MODE_SFI; +} + +static void axgbe_phy_kr_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int s0; + + axgbe_phy_set_redrv_mode(pdata); + + axgbe_phy_start_ratechange(pdata); + + /* 10G/KR */ + s0 = 0; + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4); + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0); + + /* Call FW to make the change */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + axgbe_phy_complete_ratechange(pdata); + phy_data->cur_mode = AXGBE_MODE_KR; +} + +static enum axgbe_mode axgbe_phy_cur_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + return phy_data->cur_mode; +} + +static enum axgbe_mode axgbe_phy_switch_baset_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* No switching if not 10GBase-T */ + if (phy_data->port_mode != AXGBE_PORT_MODE_10GBASE_T) + return axgbe_phy_cur_mode(pdata); + + switch (axgbe_phy_cur_mode(pdata)) { + case AXGBE_MODE_SGMII_100: + case AXGBE_MODE_SGMII_1000: + return AXGBE_MODE_KR; + case AXGBE_MODE_KR: + default: + return AXGBE_MODE_SGMII_1000; + } +} + +static enum axgbe_mode axgbe_phy_switch_bp_2500_mode(struct axgbe_port *pdata + __rte_unused) +{ + return AXGBE_MODE_KX_2500; +} + +static enum axgbe_mode axgbe_phy_switch_bp_mode(struct axgbe_port *pdata) +{ + /* If we are in KR switch to KX, and vice-versa */ + switch (axgbe_phy_cur_mode(pdata)) { + case AXGBE_MODE_KX_1000: + return AXGBE_MODE_KR; + case AXGBE_MODE_KR: + default: + return AXGBE_MODE_KX_1000; + } +} + +static enum axgbe_mode axgbe_phy_switch_mode(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return axgbe_phy_switch_bp_mode(pdata); + case AXGBE_PORT_MODE_BACKPLANE_2500: + return axgbe_phy_switch_bp_2500_mode(pdata); + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + return axgbe_phy_switch_baset_mode(pdata); + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_10GBASE_R: + case AXGBE_PORT_MODE_SFP: + /* No switching, so just return current mode */ + return axgbe_phy_cur_mode(pdata); + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_basex_mode(struct axgbe_phy_data *phy_data + __rte_unused, + int speed) +{ + switch (speed) { + case SPEED_1000: + return AXGBE_MODE_X; + case SPEED_10000: + return AXGBE_MODE_KR; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_baset_mode(struct axgbe_phy_data *phy_data + __rte_unused, + int speed) +{ + switch (speed) { + case SPEED_100: + return AXGBE_MODE_SGMII_100; + case SPEED_1000: + return AXGBE_MODE_SGMII_1000; + case SPEED_10000: + return AXGBE_MODE_KR; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_sfp_mode(struct axgbe_phy_data *phy_data, + int speed) +{ + switch (speed) { + case SPEED_100: + return AXGBE_MODE_SGMII_100; + case SPEED_1000: + if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T) + return AXGBE_MODE_SGMII_1000; + else + return AXGBE_MODE_X; + case SPEED_10000: + case SPEED_UNKNOWN: + return AXGBE_MODE_SFI; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_bp_2500_mode(int speed) +{ + switch (speed) { + case SPEED_2500: + return AXGBE_MODE_KX_2500; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_bp_mode(int speed) +{ + switch (speed) { + case SPEED_1000: + return AXGBE_MODE_KX_1000; + case SPEED_10000: + return AXGBE_MODE_KR; + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static enum axgbe_mode axgbe_phy_get_mode(struct axgbe_port *pdata, + int speed) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return axgbe_phy_get_bp_mode(speed); + case AXGBE_PORT_MODE_BACKPLANE_2500: + return axgbe_phy_get_bp_2500_mode(speed); + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + return axgbe_phy_get_baset_mode(phy_data, speed); + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_10GBASE_R: + return axgbe_phy_get_basex_mode(phy_data, speed); + case AXGBE_PORT_MODE_SFP: + return axgbe_phy_get_sfp_mode(phy_data, speed); + default: + return AXGBE_MODE_UNKNOWN; + } +} + +static void axgbe_phy_set_mode(struct axgbe_port *pdata, enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KR: + axgbe_phy_kr_mode(pdata); + break; + case AXGBE_MODE_SFI: + axgbe_phy_sfi_mode(pdata); + break; + default: + break; + } +} + +static bool axgbe_phy_check_mode(struct axgbe_port *pdata, + enum axgbe_mode mode, u32 advert) +{ + if (pdata->phy.autoneg == AUTONEG_ENABLE) { + if (pdata->phy.advertising & advert) + return true; + } else { + enum axgbe_mode cur_mode; + + cur_mode = axgbe_phy_get_mode(pdata, pdata->phy.speed); + if (cur_mode == mode) + return true; + } + + return false; +} + +static bool axgbe_phy_use_basex_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_X: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_KR: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseT_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_baset_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_SGMII_100: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_100baseT_Full); + case AXGBE_MODE_SGMII_1000: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_KR: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseT_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_sfp_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (mode) { + case AXGBE_MODE_X: + if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T) + return false; + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_SGMII_100: + if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T) + return false; + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_100baseT_Full); + case AXGBE_MODE_SGMII_1000: + if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T) + return false; + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseT_Full); + case AXGBE_MODE_SFI: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseT_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_bp_2500_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KX_2500: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_2500baseX_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_bp_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) +{ + switch (mode) { + case AXGBE_MODE_KX_1000: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_1000baseKX_Full); + case AXGBE_MODE_KR: + return axgbe_phy_check_mode(pdata, mode, + ADVERTISED_10000baseKR_Full); + default: + return false; + } +} + +static bool axgbe_phy_use_mode(struct axgbe_port *pdata, enum axgbe_mode mode) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + return axgbe_phy_use_bp_mode(pdata, mode); + case AXGBE_PORT_MODE_BACKPLANE_2500: + return axgbe_phy_use_bp_2500_mode(pdata, mode); + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + return axgbe_phy_use_baset_mode(pdata, mode); + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_10GBASE_R: + return axgbe_phy_use_basex_mode(pdata, mode); + case AXGBE_PORT_MODE_SFP: + return axgbe_phy_use_sfp_mode(pdata, mode); + default: + return false; + } +} + +static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + *an_restart = 0; + + if (phy_data->port_mode == AXGBE_PORT_MODE_SFP) { + /* Check SFP signals */ + axgbe_phy_sfp_detect(pdata); + + if (phy_data->sfp_changed) { + *an_restart = 1; + return 0; + } + + if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) + return 0; + } + + /* Link status is latched low, so read once to clear + * and then read again to get current state + */ + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + if (reg & MDIO_STAT1_LSTATUS) + return 1; + + /* No link, attempt a receiver reset cycle */ + if (phy_data->rrc_count++) { + phy_data->rrc_count = 0; + axgbe_phy_rrc(pdata); + } + + return 0; +} + +static void axgbe_phy_sfp_gpio_setup(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + reg = XP_IOREAD(pdata, XP_PROP_3); + + phy_data->sfp_gpio_address = AXGBE_GPIO_ADDRESS_PCA9555 + + XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR); + + phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK); + + phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3, + GPIO_RX_LOS); + phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3, + GPIO_TX_FAULT); + phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3, + GPIO_MOD_ABS); + phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3, + GPIO_RATE_SELECT); +} + +static void axgbe_phy_sfp_comm_setup(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg, mux_addr_hi, mux_addr_lo; + + reg = XP_IOREAD(pdata, XP_PROP_4); + + mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI); + mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO); + if (mux_addr_lo == AXGBE_SFP_DIRECT) + return; + + phy_data->sfp_comm = AXGBE_SFP_COMM_PCA9545; + phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo; + phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN); +} + +static void axgbe_phy_sfp_setup(struct axgbe_port *pdata) +{ + axgbe_phy_sfp_comm_setup(pdata); + axgbe_phy_sfp_gpio_setup(pdata); +} + +static bool axgbe_phy_redrv_error(struct axgbe_phy_data *phy_data) +{ + if (!phy_data->redrv) + return false; + + if (phy_data->redrv_if >= AXGBE_PHY_REDRV_IF_MAX) + return true; + + switch (phy_data->redrv_model) { + case AXGBE_PHY_REDRV_MODEL_4223: + if (phy_data->redrv_lane > 3) + return true; + break; + case AXGBE_PHY_REDRV_MODEL_4227: + if (phy_data->redrv_lane > 1) + return true; + break; + default: + return true; + } + + return false; +} + +static int axgbe_phy_mdio_reset_setup(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + if (phy_data->conn_type != AXGBE_CONN_TYPE_MDIO) + return 0; + reg = XP_IOREAD(pdata, XP_PROP_3); + phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET); + switch (phy_data->mdio_reset) { + case AXGBE_MDIO_RESET_NONE: + case AXGBE_MDIO_RESET_I2C_GPIO: + case AXGBE_MDIO_RESET_INT_GPIO: + break; + default: + PMD_DRV_LOG(ERR, "unsupported MDIO reset (%#x)\n", + phy_data->mdio_reset); + return -EINVAL; + } + if (phy_data->mdio_reset == AXGBE_MDIO_RESET_I2C_GPIO) { + phy_data->mdio_reset_addr = AXGBE_GPIO_ADDRESS_PCA9555 + + XP_GET_BITS(reg, XP_PROP_3, + MDIO_RESET_I2C_ADDR); + phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, + MDIO_RESET_I2C_GPIO); + } else if (phy_data->mdio_reset == AXGBE_MDIO_RESET_INT_GPIO) { + phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, + MDIO_RESET_INT_GPIO); + } + + return 0; +} + +static bool axgbe_phy_port_mode_mismatch(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)) + return false; + break; + case AXGBE_PORT_MODE_BACKPLANE_2500: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) + return false; + break; + case AXGBE_PORT_MODE_1000BASE_T: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)) + return false; + break; + case AXGBE_PORT_MODE_1000BASE_X: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) + return false; + break; + case AXGBE_PORT_MODE_NBASE_T: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500)) + return false; + break; + case AXGBE_PORT_MODE_10GBASE_T: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)) + return false; + break; + case AXGBE_PORT_MODE_10GBASE_R: + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) + return false; + break; + case AXGBE_PORT_MODE_SFP: + if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)) + return false; + break; + default: + break; + } + + return true; +} + +static bool axgbe_phy_conn_type_mismatch(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_BACKPLANE: + case AXGBE_PORT_MODE_BACKPLANE_2500: + if (phy_data->conn_type == AXGBE_CONN_TYPE_BACKPLANE) + return false; + break; + case AXGBE_PORT_MODE_1000BASE_T: + case AXGBE_PORT_MODE_1000BASE_X: + case AXGBE_PORT_MODE_NBASE_T: + case AXGBE_PORT_MODE_10GBASE_T: + case AXGBE_PORT_MODE_10GBASE_R: + if (phy_data->conn_type == AXGBE_CONN_TYPE_MDIO) + return false; + break; + case AXGBE_PORT_MODE_SFP: + if (phy_data->conn_type == AXGBE_CONN_TYPE_SFP) + return false; + break; + default: + break; + } + + return true; +} + +static bool axgbe_phy_port_enabled(struct axgbe_port *pdata) +{ + unsigned int reg; + + reg = XP_IOREAD(pdata, XP_PROP_0); + if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS)) + return false; + if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE)) + return false; + + return true; +} + +static void axgbe_phy_cdr_track(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->vdata->an_cdr_workaround) + return; + + if (!phy_data->phy_cdr_notrack) + return; + + rte_delay_us(phy_data->phy_cdr_delay + 400); + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + AXGBE_PMA_CDR_TRACK_EN_MASK, + AXGBE_PMA_CDR_TRACK_EN_ON); + + phy_data->phy_cdr_notrack = 0; +} + +static void axgbe_phy_cdr_notrack(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->vdata->an_cdr_workaround) + return; + + if (phy_data->phy_cdr_notrack) + return; + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + AXGBE_PMA_CDR_TRACK_EN_MASK, + AXGBE_PMA_CDR_TRACK_EN_OFF); + + axgbe_phy_rrc(pdata); + + phy_data->phy_cdr_notrack = 1; +} + +static void axgbe_phy_kr_training_post(struct axgbe_port *pdata) +{ + if (!pdata->cdr_track_early) + axgbe_phy_cdr_track(pdata); +} + +static void axgbe_phy_kr_training_pre(struct axgbe_port *pdata) +{ + if (pdata->cdr_track_early) + axgbe_phy_cdr_track(pdata); +} + +static void axgbe_phy_an_post(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != AXGBE_MODE_KR) + break; + + axgbe_phy_cdr_track(pdata); + + switch (pdata->an_result) { + case AXGBE_AN_READY: + case AXGBE_AN_COMPLETE: + break; + default: + if (phy_data->phy_cdr_delay < AXGBE_CDR_DELAY_MAX) + phy_data->phy_cdr_delay += AXGBE_CDR_DELAY_INC; + break; + } + break; + default: + break; + } +} + +static void axgbe_phy_an_pre(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case AXGBE_AN_MODE_CL73: + case AXGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != AXGBE_MODE_KR) + break; + + axgbe_phy_cdr_notrack(pdata); + break; + default: + break; + } +} + +static void axgbe_phy_stop(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + + /* Reset SFP data */ + axgbe_phy_sfp_reset(phy_data); + axgbe_phy_sfp_mod_absent(pdata); + + /* Reset CDR support */ + axgbe_phy_cdr_track(pdata); + + /* Power off the PHY */ + axgbe_phy_power_off(pdata); + + /* Stop the I2C controller */ + pdata->i2c_if.i2c_stop(pdata); +} + +static int axgbe_phy_start(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + /* Start the I2C controller */ + ret = pdata->i2c_if.i2c_start(pdata); + if (ret) + return ret; + + /* Start in highest supported mode */ + axgbe_phy_set_mode(pdata, phy_data->start_mode); + + /* Reset CDR support */ + axgbe_phy_cdr_track(pdata); + + /* After starting the I2C controller, we can check for an SFP */ + switch (phy_data->port_mode) { + case AXGBE_PORT_MODE_SFP: + axgbe_phy_sfp_detect(pdata); + break; + default: + break; + } + + return ret; +} + +static int axgbe_phy_reset(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data = pdata->phy_data; + enum axgbe_mode cur_mode; + + /* Reset by power cycling the PHY */ + cur_mode = phy_data->cur_mode; + axgbe_phy_power_off(pdata); + /* First time reset is done with passed unknown mode*/ + axgbe_phy_set_mode(pdata, cur_mode); + return 0; +} + +static int axgbe_phy_init(struct axgbe_port *pdata) +{ + struct axgbe_phy_data *phy_data; + unsigned int reg; + int ret; + + /* Check if enabled */ + if (!axgbe_phy_port_enabled(pdata)) { + PMD_DRV_LOG(ERR, "device is not enabled\n"); + return -ENODEV; + } + + /* Initialize the I2C controller */ + ret = pdata->i2c_if.i2c_init(pdata); + if (ret) + return ret; + + phy_data = rte_zmalloc("phy_data memory", sizeof(*phy_data), 0); + if (!phy_data) { + PMD_DRV_LOG(ERR, "phy_data allocation failed\n"); + return -ENOMEM; + } + pdata->phy_data = phy_data; + + reg = XP_IOREAD(pdata, XP_PROP_0); + phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE); + phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID); + phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS); + phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE); + phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR); + + reg = XP_IOREAD(pdata, XP_PROP_4); + phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT); + phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF); + phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR); + phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE); + phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL); + + /* Validate the connection requested */ + if (axgbe_phy_conn_type_mismatch(pdata)) { + PMD_DRV_LOG(ERR, "phy mode/connection mismatch (%#x/%#x)\n", + phy_data->port_mode, phy_data->conn_type); + return -EINVAL; + } + + /* Validate the mode requested */ + if (axgbe_phy_port_mode_mismatch(pdata)) { + PMD_DRV_LOG(ERR, "phy mode/speed mismatch (%#x/%#x)\n", + phy_data->port_mode, phy_data->port_speeds); + return -EINVAL; + } + + /* Check for and validate MDIO reset support */ + ret = axgbe_phy_mdio_reset_setup(pdata); + if (ret) + return ret; + + /* Validate the re-driver information */ + if (axgbe_phy_redrv_error(phy_data)) { + PMD_DRV_LOG(ERR, "phy re-driver settings error\n"); + return -EINVAL; + } + pdata->kr_redrv = phy_data->redrv; + + /* Indicate current mode is unknown */ + phy_data->cur_mode = AXGBE_MODE_UNKNOWN; + + /* Initialize supported features */ + pdata->phy.supported = 0; + + switch (phy_data->port_mode) { + /* Backplane support */ + case AXGBE_PORT_MODE_BACKPLANE: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_Backplane; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseKX_Full; + phy_data->start_mode = AXGBE_MODE_KX_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) { + pdata->phy.supported |= SUPPORTED_10000baseKR_Full; + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + pdata->phy.supported |= + SUPPORTED_10000baseR_FEC; + phy_data->start_mode = AXGBE_MODE_KR; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + case AXGBE_PORT_MODE_BACKPLANE_2500: + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_Backplane; + pdata->phy.supported |= SUPPORTED_2500baseX_Full; + phy_data->start_mode = AXGBE_MODE_KX_2500; + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + + /* MDIO 1GBase-T support */ + case AXGBE_PORT_MODE_1000BASE_T: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22; + break; + + /* MDIO Base-X support */ + case AXGBE_PORT_MODE_1000BASE_X: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_FIBRE; + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_X; + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22; + break; + + /* MDIO NBase-T support */ + case AXGBE_PORT_MODE_NBASE_T: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) { + pdata->phy.supported |= SUPPORTED_2500baseX_Full; + phy_data->start_mode = AXGBE_MODE_KX_2500; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL45; + break; + + /* 10GBase-T support */ + case AXGBE_PORT_MODE_10GBASE_T: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) { + pdata->phy.supported |= SUPPORTED_10000baseT_Full; + phy_data->start_mode = AXGBE_MODE_KR; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + + /* 10GBase-R support */ + case AXGBE_PORT_MODE_10GBASE_R: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + pdata->phy.supported |= SUPPORTED_10000baseT_Full; + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + pdata->phy.supported |= SUPPORTED_10000baseR_FEC; + phy_data->start_mode = AXGBE_MODE_SFI; + + phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE; + break; + + /* SFP support */ + case AXGBE_PORT_MODE_SFP: + pdata->phy.supported |= SUPPORTED_Autoneg; + pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + pdata->phy.supported |= SUPPORTED_TP; + pdata->phy.supported |= SUPPORTED_FIBRE; + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) { + pdata->phy.supported |= SUPPORTED_100baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) { + pdata->phy.supported |= SUPPORTED_1000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) { + pdata->phy.supported |= SUPPORTED_10000baseT_Full; + phy_data->start_mode = AXGBE_MODE_SFI; + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + pdata->phy.supported |= + SUPPORTED_10000baseR_FEC; + } + + phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22; + + axgbe_phy_sfp_setup(pdata); + break; + default: + return -EINVAL; + } + + if ((phy_data->conn_type & AXGBE_CONN_TYPE_MDIO) && + (phy_data->phydev_mode != AXGBE_MDIO_MODE_NONE)) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, + phy_data->phydev_mode); + if (ret) { + PMD_DRV_LOG(ERR, "mdio port/clause not compatible (%d/%u)\n", + phy_data->mdio_addr, phy_data->phydev_mode); + return -EINVAL; + } + } + + if (phy_data->redrv && !phy_data->redrv_if) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, + AXGBE_MDIO_MODE_CL22); + if (ret) { + PMD_DRV_LOG(ERR, "redriver mdio port not compatible (%u)\n", + phy_data->redrv_addr); + return -EINVAL; + } + } + + phy_data->phy_cdr_delay = AXGBE_CDR_DELAY_INIT; + return 0; +} +void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if) +{ + struct axgbe_phy_impl_if *phy_impl = &phy_if->phy_impl; + + phy_impl->init = axgbe_phy_init; + phy_impl->reset = axgbe_phy_reset; + phy_impl->start = axgbe_phy_start; + phy_impl->stop = axgbe_phy_stop; + phy_impl->link_status = axgbe_phy_link_status; + phy_impl->use_mode = axgbe_phy_use_mode; + phy_impl->set_mode = axgbe_phy_set_mode; + phy_impl->get_mode = axgbe_phy_get_mode; + phy_impl->switch_mode = axgbe_phy_switch_mode; + phy_impl->cur_mode = axgbe_phy_cur_mode; + phy_impl->an_mode = axgbe_phy_an_mode; + phy_impl->an_config = axgbe_phy_an_config; + phy_impl->an_advertising = axgbe_phy_an_advertising; + phy_impl->an_outcome = axgbe_phy_an_outcome; + + phy_impl->an_pre = axgbe_phy_an_pre; + phy_impl->an_post = axgbe_phy_an_post; + + phy_impl->kr_training_pre = axgbe_phy_kr_training_pre; + phy_impl->kr_training_post = axgbe_phy_kr_training_post; +} diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c new file mode 100644 index 00000000..b302bdd1 --- /dev/null +++ b/drivers/net/axgbe/axgbe_rxtx.c @@ -0,0 +1,674 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_rxtx.h" +#include "axgbe_phy.h" + +#include +#include +#include + +static void +axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue) +{ + uint16_t i; + struct rte_mbuf **sw_ring; + + if (rx_queue) { + sw_ring = rx_queue->sw_ring; + if (sw_ring) { + for (i = 0; i < rx_queue->nb_desc; i++) { + if (sw_ring[i]) + rte_pktmbuf_free(sw_ring[i]); + } + rte_free(sw_ring); + } + rte_free(rx_queue); + } +} + +void axgbe_dev_rx_queue_release(void *rxq) +{ + axgbe_rx_queue_release(rxq); +} + +int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + PMD_INIT_FUNC_TRACE(); + uint32_t size; + const struct rte_memzone *dma; + struct axgbe_rx_queue *rxq; + uint32_t rx_desc = nb_desc; + struct axgbe_port *pdata = dev->data->dev_private; + + /* + * validate Rx descriptors count + * should be power of 2 and less than h/w supported + */ + if ((!rte_is_power_of_2(rx_desc)) || + rx_desc > pdata->rx_desc_count) + return -EINVAL; + /* First allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("ethdev RX queue", + sizeof(struct axgbe_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq) { + PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); + return -ENOMEM; + } + + rxq->cur = 0; + rxq->dirty = 0; + rxq->pdata = pdata; + rxq->mb_pool = mp; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->nb_desc = rx_desc; + rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + + (DMA_CH_INC * rxq->queue_id)); + rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs + + DMA_CH_RDTR_LO); + rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); + + /* CRC strip in AXGBE supports per port not per queue */ + pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0; + rxq->free_thresh = rx_conf->rx_free_thresh ? + rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH; + if (rxq->free_thresh > rxq->nb_desc) + rxq->free_thresh = rxq->nb_desc >> 3; + + /* Allocate RX ring hardware descriptors */ + size = rxq->nb_desc * sizeof(union axgbe_rx_desc); + dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128, + socket_id); + if (!dma) { + PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n"); + axgbe_rx_queue_release(rxq); + return -ENOMEM; + } + rxq->ring_phys_addr = (uint64_t)dma->phys_addr; + rxq->desc = (volatile union axgbe_rx_desc *)dma->addr; + memset((void *)rxq->desc, 0, size); + /* Allocate software ring */ + size = rxq->nb_desc * sizeof(struct rte_mbuf *); + rxq->sw_ring = rte_zmalloc_socket("sw_ring", size, + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq->sw_ring) { + PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n"); + axgbe_rx_queue_release(rxq); + return -ENOMEM; + } + dev->data->rx_queues[queue_idx] = rxq; + if (!pdata->rx_queues) + pdata->rx_queues = dev->data->rx_queues; + + return 0; +} + +static void axgbe_prepare_rx_stop(struct axgbe_port *pdata, + unsigned int queue) +{ + unsigned int rx_status; + unsigned long rx_timeout; + + /* The Rx engine cannot be stopped if it is actively processing + * packets. Wait for the Rx queue to empty the Rx fifo. Don't + * wait forever though... + */ + rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * + rte_get_timer_hz()); + + while (time_before(rte_get_timer_cycles(), rx_timeout)) { + rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); + if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && + (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) + break; + + rte_delay_us(900); + } + + if (!time_before(rte_get_timer_cycles(), rx_timeout)) + PMD_DRV_LOG(ERR, + "timed out waiting for Rx queue %u to empty\n", + queue); +} + +void axgbe_dev_disable_rx(struct rte_eth_dev *dev) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + + /* Disable MAC Rx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); + + /* Prepare for Rx DMA channel stop */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + axgbe_prepare_rx_stop(pdata, i); + } + /* Disable each Rx queue */ + AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + /* Disable Rx DMA channel */ + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); + } +} + +void axgbe_dev_enable_rx(struct rte_eth_dev *dev) +{ + struct axgbe_rx_queue *rxq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + unsigned int reg_val = 0; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + /* Enable Rx DMA channel */ + AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); + } + + reg_val = 0; + for (i = 0; i < pdata->rx_q_count; i++) + reg_val |= (0x02 << (i << 1)); + AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); + + /* Enable MAC Rx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); + /* Frame is forwarded after stripping CRC to application*/ + if (pdata->crc_strip_enable) { + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); + } + AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); +} + +/* Rx function one to one refresh */ +uint16_t +axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + PMD_INIT_FUNC_TRACE(); + uint16_t nb_rx = 0; + struct axgbe_rx_queue *rxq = rx_queue; + volatile union axgbe_rx_desc *desc; + uint64_t old_dirty = rxq->dirty; + struct rte_mbuf *mbuf, *tmbuf; + unsigned int err; + uint32_t error_status; + uint16_t idx, pidx, pkt_len; + + idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); + while (nb_rx < nb_pkts) { + if (unlikely(idx == rxq->nb_desc)) + idx = 0; + + desc = &rxq->desc[idx]; + + if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) + break; + tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (unlikely(!tmbuf)) { + PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u" + " queue_id = %u\n", + (unsigned int)rxq->port_id, + (unsigned int)rxq->queue_id); + rte_eth_devices[ + rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + pidx = idx + 1; + if (unlikely(pidx == rxq->nb_desc)) + pidx = 0; + + rte_prefetch0(rxq->sw_ring[pidx]); + if ((pidx & 0x3) == 0) { + rte_prefetch0(&rxq->desc[pidx]); + rte_prefetch0(&rxq->sw_ring[pidx]); + } + + mbuf = rxq->sw_ring[idx]; + /* Check for any errors and free mbuf*/ + err = AXGMAC_GET_BITS_LE(desc->write.desc3, + RX_NORMAL_DESC3, ES); + error_status = 0; + if (unlikely(err)) { + error_status = desc->write.desc3 & AXGBE_ERR_STATUS; + if ((error_status != AXGBE_L3_CSUM_ERR) && + (error_status != AXGBE_L4_CSUM_ERR)) { + rxq->errors++; + rte_pktmbuf_free(mbuf); + goto err_set; + } + } + if (rxq->pdata->rx_csum_enable) { + mbuf->ol_flags = 0; + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { + mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + } else if ( + unlikely(error_status == AXGBE_L4_CSUM_ERR)) { + mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); + /* Get the RSS hash */ + if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) + mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); + pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, + PL) - rxq->crc_len; + /* Mbuf populate */ + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + mbuf->pkt_len = pkt_len; + mbuf->data_len = pkt_len; + rxq->bytes += pkt_len; + rx_pkts[nb_rx++] = mbuf; +err_set: + rxq->cur++; + rxq->sw_ring[idx++] = tmbuf; + desc->read.baddr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); + memset((void *)(&desc->read.desc2), 0, 8); + AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); + rxq->dirty++; + } + rxq->pkts += nb_rx; + if (rxq->dirty != old_dirty) { + rte_wmb(); + idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, + low32_value(rxq->ring_phys_addr + + (idx * sizeof(union axgbe_rx_desc)))); + } + + return nb_rx; +} + +/* Tx Apis */ +static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) +{ + uint16_t i; + struct rte_mbuf **sw_ring; + + if (tx_queue) { + sw_ring = tx_queue->sw_ring; + if (sw_ring) { + for (i = 0; i < tx_queue->nb_desc; i++) { + if (sw_ring[i]) + rte_pktmbuf_free(sw_ring[i]); + } + rte_free(sw_ring); + } + rte_free(tx_queue); + } +} + +void axgbe_dev_tx_queue_release(void *txq) +{ + axgbe_tx_queue_release(txq); +} + +int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + PMD_INIT_FUNC_TRACE(); + uint32_t tx_desc; + struct axgbe_port *pdata; + struct axgbe_tx_queue *txq; + unsigned int tsize; + const struct rte_memzone *tz; + + tx_desc = nb_desc; + pdata = (struct axgbe_port *)dev->data->dev_private; + + /* + * validate tx descriptors count + * should be power of 2 and less than h/w supported + */ + if ((!rte_is_power_of_2(tx_desc)) || + tx_desc > pdata->tx_desc_count || + tx_desc < AXGBE_MIN_RING_DESC) + return -EINVAL; + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue), + RTE_CACHE_LINE_SIZE); + if (!txq) + return -ENOMEM; + txq->pdata = pdata; + + txq->nb_desc = tx_desc; + txq->free_thresh = tx_conf->tx_free_thresh ? + tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH; + if (txq->free_thresh > txq->nb_desc) + txq->free_thresh = (txq->nb_desc >> 1); + txq->free_batch_cnt = txq->free_thresh; + + /* In vector_tx path threshold should be multiple of queue_size*/ + if (txq->nb_desc % txq->free_thresh != 0) + txq->vector_disable = 1; + + if ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) != + ETH_TXQ_FLAGS_NOOFFLOADS) { + txq->vector_disable = 1; + } + + /* Allocate TX ring hardware descriptors */ + tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc); + tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, + tsize, AXGBE_DESC_ALIGN, socket_id); + if (!tz) { + axgbe_tx_queue_release(txq); + return -ENOMEM; + } + memset(tz->addr, 0, tsize); + txq->ring_phys_addr = (uint64_t)tz->phys_addr; + txq->desc = tz->addr; + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + + (DMA_CH_INC * txq->queue_id)); + txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs + + DMA_CH_TDTR_LO); + txq->cur = 0; + txq->dirty = 0; + txq->nb_desc_free = txq->nb_desc; + /* Allocate software ring */ + tsize = txq->nb_desc * sizeof(struct rte_mbuf *); + txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, + RTE_CACHE_LINE_SIZE); + if (!txq->sw_ring) { + axgbe_tx_queue_release(txq); + return -ENOMEM; + } + dev->data->tx_queues[queue_idx] = txq; + if (!pdata->tx_queues) + pdata->tx_queues = dev->data->tx_queues; + + if (txq->vector_disable) + dev->tx_pkt_burst = &axgbe_xmit_pkts; + else +#ifdef RTE_ARCH_X86 + dev->tx_pkt_burst = &axgbe_xmit_pkts_vec; +#else + dev->tx_pkt_burst = &axgbe_xmit_pkts; +#endif + + return 0; +} + +static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata, + unsigned int queue) +{ + unsigned int tx_status; + unsigned long tx_timeout; + + /* The Tx engine cannot be stopped if it is actively processing + * packets. Wait for the Tx queue to empty the Tx fifo. Don't + * wait forever though... + */ + tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * + rte_get_timer_hz()); + while (time_before(rte_get_timer_cycles(), tx_timeout)) { + tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); + if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && + (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) + break; + + rte_delay_us(900); + } + + if (!time_before(rte_get_timer_cycles(), tx_timeout)) + PMD_DRV_LOG(ERR, + "timed out waiting for Tx queue %u to empty\n", + queue); +} + +static void axgbe_prepare_tx_stop(struct axgbe_port *pdata, + unsigned int queue) +{ + unsigned int tx_dsr, tx_pos, tx_qidx; + unsigned int tx_status; + unsigned long tx_timeout; + + if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) + return axgbe_txq_prepare_tx_stop(pdata, queue); + + /* Calculate the status register to read and the position within */ + if (queue < DMA_DSRX_FIRST_QUEUE) { + tx_dsr = DMA_DSR0; + tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; + } else { + tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; + + tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); + tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + + DMA_DSRX_TPS_START; + } + + /* The Tx engine cannot be stopped if it is actively processing + * descriptors. Wait for the Tx engine to enter the stopped or + * suspended state. Don't wait forever though... + */ + tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * + rte_get_timer_hz()); + while (time_before(rte_get_timer_cycles(), tx_timeout)) { + tx_status = AXGMAC_IOREAD(pdata, tx_dsr); + tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); + if ((tx_status == DMA_TPS_STOPPED) || + (tx_status == DMA_TPS_SUSPENDED)) + break; + + rte_delay_us(900); + } + + if (!time_before(rte_get_timer_cycles(), tx_timeout)) + PMD_DRV_LOG(ERR, + "timed out waiting for Tx DMA channel %u to stop\n", + queue); +} + +void axgbe_dev_disable_tx(struct rte_eth_dev *dev) +{ + struct axgbe_tx_queue *txq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + + /* Prepare for stopping DMA channel */ + for (i = 0; i < pdata->tx_q_count; i++) { + txq = dev->data->tx_queues[i]; + axgbe_prepare_tx_stop(pdata, i); + } + /* Disable MAC Tx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); + /* Disable each Tx queue*/ + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, + 0); + /* Disable each Tx DMA channel */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); + } +} + +void axgbe_dev_enable_tx(struct rte_eth_dev *dev) +{ + struct axgbe_tx_queue *txq; + struct axgbe_port *pdata = dev->data->dev_private; + unsigned int i; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Enable Tx DMA channel */ + AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); + } + /* Enable Tx queue*/ + for (i = 0; i < pdata->tx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, + MTL_Q_ENABLED); + /* Enable MAC Tx */ + AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); +} + +/* Free Tx conformed mbufs */ +static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq) +{ + volatile struct axgbe_tx_desc *desc; + uint16_t idx; + + idx = AXGBE_GET_DESC_IDX(txq, txq->dirty); + while (txq->cur != txq->dirty) { + if (unlikely(idx == txq->nb_desc)) + idx = 0; + desc = &txq->desc[idx]; + /* Check for ownership */ + if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) + return; + memset((void *)&desc->desc2, 0, 8); + /* Free mbuf */ + rte_pktmbuf_free(txq->sw_ring[idx]); + txq->sw_ring[idx++] = NULL; + txq->dirty++; + } +} + +/* Tx Descriptor formation + * Considering each mbuf requires one desc + * mbuf is linear + */ +static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, + struct rte_mbuf *mbuf) +{ + volatile struct axgbe_tx_desc *desc; + uint16_t idx; + uint64_t mask; + + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + desc = &txq->desc[idx]; + + /* Update buffer address and length */ + desc->baddr = rte_mbuf_data_iova(mbuf); + AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, + mbuf->pkt_len); + /* Total msg length to transmit */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, + mbuf->pkt_len); + /* Mark it as First and Last Descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); + /* Mark it as a NORMAL descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); + /* configure h/w Offload */ + mask = mbuf->ol_flags & PKT_TX_L4_MASK; + if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM)) + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); + else if (mbuf->ol_flags & PKT_TX_IP_CKSUM) + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); + rte_wmb(); + + /* Set OWN bit */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); + rte_wmb(); + + /* Save mbuf */ + txq->sw_ring[idx] = mbuf; + /* Update current index*/ + txq->cur++; + /* Update stats */ + txq->bytes += mbuf->pkt_len; + + return 0; +} + +/* Eal supported tx wrapper*/ +uint16_t +axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + PMD_INIT_FUNC_TRACE(); + + if (unlikely(nb_pkts == 0)) + return nb_pkts; + + struct axgbe_tx_queue *txq; + uint16_t nb_desc_free; + uint16_t nb_pkt_sent = 0; + uint16_t idx; + uint32_t tail_addr; + struct rte_mbuf *mbuf; + + txq = (struct axgbe_tx_queue *)tx_queue; + nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); + + if (unlikely(nb_desc_free <= txq->free_thresh)) { + axgbe_xmit_cleanup(txq); + nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); + if (unlikely(nb_desc_free == 0)) + return 0; + } + nb_pkts = RTE_MIN(nb_desc_free, nb_pkts); + while (nb_pkts--) { + mbuf = *tx_pkts++; + if (axgbe_xmit_hw(txq, mbuf)) + goto out; + nb_pkt_sent++; + } +out: + /* Sync read and write */ + rte_mb(); + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + tail_addr = low32_value(txq->ring_phys_addr + + idx * sizeof(struct axgbe_tx_desc)); + /* Update tail reg with next immediate address to kick Tx DMA channel*/ + AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); + txq->pkts += nb_pkt_sent; + return nb_pkt_sent; +} + +void axgbe_dev_clear_queues(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + uint8_t i; + struct axgbe_rx_queue *rxq; + struct axgbe_tx_queue *txq; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + if (rxq) { + axgbe_rx_queue_release(rxq); + dev->data->rx_queues[i] = NULL; + } + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + if (txq) { + axgbe_tx_queue_release(txq); + dev->data->tx_queues[i] = NULL; + } + } +} diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h new file mode 100644 index 00000000..917da58c --- /dev/null +++ b/drivers/net/axgbe/axgbe_rxtx.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#ifndef _AXGBE_RXTX_H_ +#define _AXGBE_RXTX_H_ + +/* to suppress gcc warnings related to descriptor casting*/ +#ifdef RTE_TOOLCHAIN_GCC +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +#ifdef RTE_TOOLCHAIN_CLANG +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +/* Descriptor related defines */ +#define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/ +#define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3) +#define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1) +#define AXGBE_MIN_RING_DESC 32 +#define RTE_AXGBE_DESCS_PER_LOOP 4 +#define RTE_AXGBE_MAX_RX_BURST 32 + +#define AXGBE_RX_FREE_THRESH 32 +#define AXGBE_TX_FREE_THRESH 32 + +#define AXGBE_DESC_ALIGN 128 +#define AXGBE_DESC_OWN 0x80000000 +#define AXGBE_ERR_STATUS 0x000f0000 +#define AXGBE_L3_CSUM_ERR 0x00050000 +#define AXGBE_L4_CSUM_ERR 0x00060000 + +#include "axgbe_common.h" + +#define AXGBE_GET_DESC_PT(_queue, _idx) \ + (((_queue)->desc) + \ + ((_idx) & ((_queue)->nb_desc - 1))) + +#define AXGBE_GET_DESC_IDX(_queue, _idx) \ + ((_idx) & ((_queue)->nb_desc - 1)) \ + +/* Rx desc format */ +union axgbe_rx_desc { + struct { + uint64_t baddr; + uint32_t desc2; + uint32_t desc3; + } read; + struct { + uint32_t desc0; + uint32_t desc1; + uint32_t desc2; + uint32_t desc3; + } write; +}; + +struct axgbe_rx_queue { + /* membuf pool for rx buffers */ + struct rte_mempool *mb_pool; + /* H/w Rx buffer size configured in DMA */ + unsigned int buf_size; + /* CRC h/w offload */ + uint16_t crc_len; + /* address of s/w rx buffers */ + struct rte_mbuf **sw_ring; + /* Port private data */ + struct axgbe_port *pdata; + /* Number of Rx descriptors in queue */ + uint16_t nb_desc; + /* max free RX desc to hold */ + uint16_t free_thresh; + /* Index of descriptor to check for packet availability */ + uint64_t cur; + /* Index of descriptor to check for buffer reallocation */ + uint64_t dirty; + /* Software Rx descriptor ring*/ + volatile union axgbe_rx_desc *desc; + /* Ring physical address */ + uint64_t ring_phys_addr; + /* Dma Channel register address */ + void *dma_regs; + /* Dma channel tail register address*/ + volatile uint32_t *dma_tail_reg; + /* DPDK queue index */ + uint16_t queue_id; + /* dpdk port id*/ + uint16_t port_id; + /* queue stats */ + uint64_t pkts; + uint64_t bytes; + uint64_t errors; + /* Number of mbufs allocated from pool*/ + uint64_t mbuf_alloc; + +} __rte_cache_aligned; + +/*Tx descriptor format */ +struct axgbe_tx_desc { + phys_addr_t baddr; + uint32_t desc2; + uint32_t desc3; +}; + +struct axgbe_tx_queue { + /* Port private data reference */ + struct axgbe_port *pdata; + /* Number of Tx descriptors in queue*/ + uint16_t nb_desc; + /* Start freeing TX buffers if there are less free descriptors than + * this value + */ + uint16_t free_thresh; + /* Available descriptors for Tx processing*/ + uint16_t nb_desc_free; + /* Batch of mbufs/descs to release */ + uint16_t free_batch_cnt; + /* Flag for vector support */ + uint16_t vector_disable; + /* Index of descriptor to be used for current transfer */ + uint64_t cur; + /* Index of descriptor to check for transfer complete */ + uint64_t dirty; + /* Virtual address of ring */ + volatile struct axgbe_tx_desc *desc; + /* Physical address of ring */ + uint64_t ring_phys_addr; + /* Dma channel register space */ + void *dma_regs; + /* Dma tail register address of ring*/ + volatile uint32_t *dma_tail_reg; + /* Tx queue index/id*/ + uint16_t queue_id; + /* Reference to hold Tx mbufs mapped to Tx descriptors freed + * after transmission confirmation + */ + struct rte_mbuf **sw_ring; + /* dpdk port id*/ + uint16_t port_id; + /* queue stats */ + uint64_t pkts; + uint64_t bytes; + uint64_t errors; + +} __rte_cache_aligned; + +/*Queue related APIs */ + +/* + * RX/TX function prototypes + */ + + +void axgbe_dev_tx_queue_release(void *txq); +int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +void axgbe_dev_enable_tx(struct rte_eth_dev *dev); +void axgbe_dev_disable_tx(struct rte_eth_dev *dev); +int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + + +void axgbe_dev_rx_queue_release(void *rxq); +int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); +void axgbe_dev_enable_rx(struct rte_eth_dev *dev); +void axgbe_dev_disable_rx(struct rte_eth_dev *dev); +int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +void axgbe_dev_clear_queues(struct rte_eth_dev *dev); + +#endif /* _AXGBE_RXTX_H_ */ diff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c new file mode 100644 index 00000000..9be70371 --- /dev/null +++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + * Copyright(c) 2018 Synopsys, Inc. All rights reserved. + */ + +#include "axgbe_ethdev.h" +#include "axgbe_rxtx.h" +#include "axgbe_phy.h" + +#include +#include +#include + +/* Useful to avoid shifting for every descriptor prepration*/ +#define TX_DESC_CTRL_FLAGS 0xb000000000000000 +#define TX_FREE_BULK 8 +#define TX_FREE_BULK_CHECK (TX_FREE_BULK - 1) + +static inline void +axgbe_vec_tx(volatile struct axgbe_tx_desc *desc, + struct rte_mbuf *mbuf) +{ + __m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 | + TX_DESC_CTRL_FLAGS | mbuf->data_len, + mbuf->buf_iova + + mbuf->data_off); + _mm_store_si128((__m128i *)desc, descriptor); +} + +static void +axgbe_xmit_cleanup_vec(struct axgbe_tx_queue *txq) +{ + volatile struct axgbe_tx_desc *desc; + int idx, i; + + idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt + - 1); + desc = &txq->desc[idx]; + if (desc->desc3 & AXGBE_DESC_OWN) + return; + /* memset avoided for desc ctrl fields since in vec_tx path + * all 128 bits are populated + */ + for (i = 0; i < txq->free_batch_cnt; i++, idx--) + rte_pktmbuf_free_seg(txq->sw_ring[idx]); + + + txq->dirty += txq->free_batch_cnt; + txq->nb_desc_free += txq->free_batch_cnt; +} + +uint16_t +axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + PMD_INIT_FUNC_TRACE(); + + struct axgbe_tx_queue *txq; + uint16_t idx, nb_commit, loop, i; + uint32_t tail_addr; + + txq = (struct axgbe_tx_queue *)tx_queue; + if (txq->nb_desc_free < txq->free_thresh) { + axgbe_xmit_cleanup_vec(txq); + if (unlikely(txq->nb_desc_free == 0)) + return 0; + } + nb_pkts = RTE_MIN(txq->nb_desc_free, nb_pkts); + nb_commit = nb_pkts; + idx = AXGBE_GET_DESC_IDX(txq, txq->cur); + loop = txq->nb_desc - idx; + if (nb_commit >= loop) { + for (i = 0; i < loop; ++i, ++idx, ++tx_pkts) { + axgbe_vec_tx(&txq->desc[idx], *tx_pkts); + txq->sw_ring[idx] = *tx_pkts; + } + nb_commit -= loop; + idx = 0; + } + for (i = 0; i < nb_commit; ++i, ++idx, ++tx_pkts) { + axgbe_vec_tx(&txq->desc[idx], *tx_pkts); + txq->sw_ring[idx] = *tx_pkts; + } + txq->cur += nb_pkts; + tail_addr = (uint32_t)(txq->ring_phys_addr + + idx * sizeof(struct axgbe_tx_desc)); + /* Update tail reg with next immediate address to kick Tx DMA channel*/ + rte_write32(tail_addr, (void *)txq->dma_tail_reg); + txq->pkts += nb_pkts; + txq->nb_desc_free -= nb_pkts; + + return nb_pkts; +} diff --git a/drivers/net/axgbe/meson.build b/drivers/net/axgbe/meson.build new file mode 100644 index 00000000..548ffff7 --- /dev/null +++ b/drivers/net/axgbe/meson.build @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +if host_machine.system() != 'linux' + build = false +endif + +sources = files('axgbe_ethdev.c', + 'axgbe_dev.c', + 'axgbe_mdio.c', + 'axgbe_phy_impl.c', + 'axgbe_i2c.c', + 'axgbe_rxtx.c') + +cflags += '-Wno-cast-qual' + +if arch_subdir == 'x86' + sources += files('axgbe_rxtx_vec_sse.c') +endif diff --git a/drivers/net/axgbe/rte_pmd_axgbe_version.map b/drivers/net/axgbe/rte_pmd_axgbe_version.map new file mode 100644 index 00000000..b26efa67 --- /dev/null +++ b/drivers/net/axgbe/rte_pmd_axgbe_version.map @@ -0,0 +1,4 @@ +DPDK_18.05 { + + local: *; +}; diff --git a/drivers/net/bnx2x/LICENSE.bnx2x_pmd b/drivers/net/bnx2x/LICENSE.bnx2x_pmd index 96c7c1e1..64c6ef2c 100644 --- a/drivers/net/bnx2x/LICENSE.bnx2x_pmd +++ b/drivers/net/bnx2x/LICENSE.bnx2x_pmd @@ -1,28 +1,3 @@ -/* - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Broadcom Corporation nor the name of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written consent. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2014-2018 Cavium Inc. */ diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile index 90ff8b1e..150b4cfa 100644 --- a/drivers/net/bnx2x/Makefile +++ b/drivers/net/bnx2x/Makefile @@ -1,3 +1,8 @@ +# Copyright (c) 2014 - 2018 Cavium Inc. +# All rights reserved. +# www.cavium.com +# +# See LICENSE.bnx2x_pmd for copyright and licensing details. include $(RTE_SDK)/mk/rte.vars.mk # @@ -17,10 +22,6 @@ EXPORT_MAP := rte_pmd_bnx2x_version.map LIBABIVER := 1 -ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) -CFLAGS += -wd188 #188: enumerated type mixed with another type -endif - # # all source are stored in SRCS-y # diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c index fb02d0f3..84ade5fb 100644 --- a/drivers/net/bnx2x/bnx2x.c +++ b/drivers/net/bnx2x/bnx2x.c @@ -6,9 +6,9 @@ * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ @@ -170,16 +170,16 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma, dma->sc = sc; if (IS_PF(sc)) - sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, + snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, rte_get_timer_cycles()); else - sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, + snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, rte_get_timer_cycles()); /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */ - z = rte_memzone_reserve_aligned(mz_name, (uint64_t) (size), + z = rte_memzone_reserve_aligned(mz_name, (uint64_t)size, SOCKET_ID_ANY, - 0, align); + RTE_MEMZONE_IOVA_CONTIG, align); if (z == NULL) { PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg); return -ENOMEM; @@ -8285,16 +8285,6 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc) REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); } - -/* - * Enable internal target-read (in case we are probed after PF - * FLR). Must be done prior to any BAR read access. Only for - * 57712 and up - */ - if (!CHIP_IS_E1x(sc)) { - REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, - 1); - } } /* get the nvram size */ @@ -9671,7 +9661,17 @@ int bnx2x_attach(struct bnx2x_softc *sc) bnx2x_init_rte(sc); if (IS_PF(sc)) { -/* get device info and set params */ + /* Enable internal target-read (in case we are probed after PF + * FLR). Must be done prior to any BAR read access. Only for + * 57712 and up + */ + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, + 1); + DELAY(200000); + } + + /* get device info and set params */ if (bnx2x_get_device_info(sc) != 0) { PMD_DRV_LOG(NOTICE, "getting device info"); return -ENXIO; @@ -9680,7 +9680,7 @@ int bnx2x_attach(struct bnx2x_softc *sc) /* get phy settings from shmem and 'and' against admin settings */ bnx2x_get_phy_info(sc); } else { -/* Left mac of VF unfilled, PF should set it for VF */ + /* Left mac of VF unfilled, PF should set it for VF */ memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN); } diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 17075d38..4150fd85 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -6,9 +6,9 @@ * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c index 483d5a17..6a9cd581 100644 --- a/drivers/net/bnx2x/bnx2x_ethdev.c +++ b/drivers/net/bnx2x/bnx2x_ethdev.c @@ -1,9 +1,9 @@ /* * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ @@ -140,11 +140,13 @@ static int bnx2x_dev_configure(struct rte_eth_dev *dev) { struct bnx2x_softc *sc = dev->data->dev_private; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF); PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.jumbo_frame) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len; if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) { @@ -447,13 +449,13 @@ static void bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct bnx2x_softc *sc = dev->data->dev_private; - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = sc->max_rx_queues; dev_info->max_tx_queues = sc->max_tx_queues; dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE; dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN; dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS; dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME; } static int @@ -642,24 +644,14 @@ static struct rte_pci_driver rte_bnx2xvf_pmd; static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) { - struct rte_eth_dev *eth_dev; - int ret; - - eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct bnx2x_softc)); - if (!eth_dev) - return -ENOMEM; - if (pci_drv == &rte_bnx2x_pmd) - ret = eth_bnx2x_dev_init(eth_dev); + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct bnx2x_softc), eth_bnx2x_dev_init); else if (pci_drv == &rte_bnx2xvf_pmd) - ret = eth_bnx2xvf_dev_init(eth_dev); + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init); else - ret = -EINVAL; - - if (ret) - rte_eth_dev_pci_release(eth_dev); - - return ret; + return -EINVAL; } static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev) @@ -695,10 +687,10 @@ RTE_INIT(bnx2x_init_log); static void bnx2x_init_log(void) { - bnx2x_logtype_init = rte_log_register("pmd.bnx2x.init"); + bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init"); if (bnx2x_logtype_init >= 0) rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE); - bnx2x_logtype_driver = rte_log_register("pmd.bnx2x.driver"); + bnx2x_logtype_driver = rte_log_register("pmd.net.bnx2x.driver"); if (bnx2x_logtype_driver >= 0) rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE); } diff --git a/drivers/net/bnx2x/bnx2x_ethdev.h b/drivers/net/bnx2x/bnx2x_ethdev.h index 37cac158..f05be7ee 100644 --- a/drivers/net/bnx2x/bnx2x_ethdev.h +++ b/drivers/net/bnx2x/bnx2x_ethdev.h @@ -1,9 +1,9 @@ /* * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/bnx2x_logs.h b/drivers/net/bnx2x/bnx2x_logs.h index 08c1b764..69a2fe1d 100644 --- a/drivers/net/bnx2x/bnx2x_logs.h +++ b/drivers/net/bnx2x/bnx2x_logs.h @@ -1,9 +1,9 @@ /* * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index a0d4ac92..331884cf 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -1,9 +1,9 @@ /* * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ @@ -26,7 +26,8 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, if (mz) return mz; - return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE); + return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, BNX2X_PAGE_SIZE); } static void @@ -140,7 +141,8 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } rxq->sw_ring[idx] = mbuf; - rxq->rx_ring[idx] = mbuf->buf_iova; + rxq->rx_ring[idx] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); } rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; @@ -400,7 +402,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_mb = rxq->sw_ring[bd_cons]; rxq->sw_ring[bd_cons] = new_mb; - rxq->rx_ring[bd_prod] = new_mb->buf_iova; + rxq->rx_ring[bd_prod] = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb)); rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq); rte_prefetch0(rxq->sw_ring[rx_pref]); @@ -409,7 +412,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rte_prefetch0(&rxq->sw_ring[rx_pref]); } - rx_mb->data_off = pad; + rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; rx_mb->nb_segs = 1; rx_mb->next = NULL; rx_mb->pkt_len = rx_mb->data_len = len; diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h index 9600e0f1..94b9e1b6 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.h +++ b/drivers/net/bnx2x/bnx2x_rxtx.h @@ -1,9 +1,9 @@ /* * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index b9b85963..e3880abe 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -1,14 +1,14 @@ /*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h index 3396de31..6fcaf607 100644 --- a/drivers/net/bnx2x/bnx2x_stats.h +++ b/drivers/net/bnx2x/bnx2x_stats.h @@ -1,14 +1,14 @@ /*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c index 3c08f2a2..dacad771 100644 --- a/drivers/net/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/bnx2x/bnx2x_vfpf.c @@ -1,9 +1,9 @@ /* * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h index d7cc11be..c4675d4c 100644 --- a/drivers/net/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/bnx2x/bnx2x_vfpf.h @@ -1,9 +1,9 @@ /* * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. * - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/ecore_fw_defs.h b/drivers/net/bnx2x/ecore_fw_defs.h index ab490efa..d10dd108 100644 --- a/drivers/net/bnx2x/ecore_fw_defs.h +++ b/drivers/net/bnx2x/ecore_fw_defs.h @@ -1,13 +1,13 @@ /*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * - * Copyright (c) 2014-2015 QLogic Corporation. + * Copyright (c) 2014-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h index 5cce6647..0220e5f9 100644 --- a/drivers/net/bnx2x/ecore_hsi.h +++ b/drivers/net/bnx2x/ecore_hsi.h @@ -1,13 +1,13 @@ /*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * - * Copyright (c) 2014-2015 QLogic Corporation. + * Copyright (c) 2014-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h index 4576c565..8d00abb7 100644 --- a/drivers/net/bnx2x/ecore_init.h +++ b/drivers/net/bnx2x/ecore_init.h @@ -1,14 +1,14 @@ /*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/ecore_init_ops.h b/drivers/net/bnx2x/ecore_init_ops.h index b6f98324..dd5df3d5 100644 --- a/drivers/net/bnx2x/ecore_init_ops.h +++ b/drivers/net/bnx2x/ecore_init_ops.h @@ -1,14 +1,14 @@ /*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/ecore_mfw_req.h b/drivers/net/bnx2x/ecore_mfw_req.h index 57529097..c798c74c 100644 --- a/drivers/net/bnx2x/ecore_mfw_req.h +++ b/drivers/net/bnx2x/ecore_mfw_req.h @@ -1,13 +1,13 @@ /*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * - * Copyright (c) 2014-2015 QLogic Corporation. + * Copyright (c) 2014-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h index d8203b45..9800bafc 100644 --- a/drivers/net/bnx2x/ecore_reg.h +++ b/drivers/net/bnx2x/ecore_reg.h @@ -1,13 +1,13 @@ /*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * - * Copyright (c) 2014-2015 QLogic Corporation. + * Copyright (c) 2014-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c index a75a7fa4..75329672 100644 --- a/drivers/net/bnx2x/ecore_sp.c +++ b/drivers/net/bnx2x/ecore_sp.c @@ -1,14 +1,14 @@ /*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h index ff40413c..772c8b1b 100644 --- a/drivers/net/bnx2x/ecore_sp.h +++ b/drivers/net/bnx2x/ecore_sp.h @@ -1,14 +1,14 @@ /*- - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c index 9d0f3136..34a29373 100644 --- a/drivers/net/bnx2x/elink.c +++ b/drivers/net/bnx2x/elink.c @@ -1,14 +1,14 @@ /* - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ @@ -4143,9 +4143,9 @@ static void elink_sfp_e3_set_transmitter(struct elink_params *params, elink_set_cfg_pin(sc, cfg_pin + 3, tx_en ^ 1); } -static void elink_warpcore_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_warpcore_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint32_t serdes_net_if; @@ -4222,7 +4222,7 @@ static void elink_warpcore_config_init(struct elink_phy *phy, case PORT_HW_CFG_NET_SERDES_IF_DXGXS: if (vars->line_speed != ELINK_SPEED_20000) { PMD_DRV_LOG(DEBUG, "Speed not supported yet"); - return; + return 0; } PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS"); elink_warpcore_set_20G_DXGXS(sc, phy, lane); @@ -4242,13 +4242,15 @@ static void elink_warpcore_config_init(struct elink_phy *phy, PMD_DRV_LOG(DEBUG, "Unsupported Serdes Net Interface 0x%x", serdes_net_if); - return; + return 0; } } /* Take lane out of reset after configuration is finished */ elink_warpcore_reset_lane(sc, phy, 0); PMD_DRV_LOG(DEBUG, "Exit config init"); + + return 0; } static void elink_warpcore_link_reset(struct elink_phy *phy, @@ -5226,9 +5228,9 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy, return ELINK_STATUS_OK; } -static elink_status_t elink_link_settings_status(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_link_settings_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; @@ -5299,9 +5301,9 @@ static elink_status_t elink_link_settings_status(struct elink_phy *phy, return rc; } -static elink_status_t elink_warpcore_read_status(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_warpcore_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint8_t lane; @@ -5520,9 +5522,9 @@ static void elink_set_preemphasis(struct elink_phy *phy, } } -static void elink_xgxs_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_xgxs_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) || (params->loopback_mode == ELINK_LOOPBACK_XGXS)); @@ -5567,6 +5569,8 @@ static void elink_xgxs_config_init(struct elink_phy *phy, elink_initialize_sgmii_process(phy, params, vars); } + + return 0; } static elink_status_t elink_prepare_xgxs(struct elink_phy *phy, @@ -5751,8 +5755,8 @@ static void elink_link_int_ack(struct elink_params *params, } } -static elink_status_t elink_format_ver(uint32_t num, uint8_t * str, - uint16_t * len) +static uint8_t elink_format_ver(uint32_t num, uint8_t * str, + uint16_t * len) { uint8_t *str_ptr = str; uint32_t mask = 0xf0000000; @@ -5790,8 +5794,8 @@ static elink_status_t elink_format_ver(uint32_t num, uint8_t * str, return ELINK_STATUS_OK; } -static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver, - uint8_t * str, uint16_t * len) +static uint8_t elink_null_format_ver(__rte_unused uint32_t spirom_ver, + uint8_t * str, uint16_t * len) { str[0] = '\0'; (*len)--; @@ -6802,9 +6806,9 @@ static void elink_8073_specific_func(struct elink_phy *phy, } } -static elink_status_t elink_8073_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_8073_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint16_t val = 0, tmp1; @@ -7097,9 +7101,9 @@ static void elink_8073_link_reset(__rte_unused struct elink_phy *phy, /******************************************************************/ /* BNX2X8705 PHY SECTION */ /******************************************************************/ -static elink_status_t elink_8705_config_init(struct elink_phy *phy, - struct elink_params *params, - __rte_unused struct elink_vars +static uint8_t elink_8705_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; @@ -8403,9 +8407,9 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy, return ELINK_STATUS_OK; } -static elink_status_t elink_8706_read_status(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_8706_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { return elink_8706_8726_read_status(phy, params, vars); } @@ -8477,9 +8481,9 @@ static uint8_t elink_8726_read_status(struct elink_phy *phy, return link_up; } -static elink_status_t elink_8726_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_8726_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726"); @@ -8684,9 +8688,9 @@ static void elink_8727_config_speed(struct elink_phy *phy, } } -static elink_status_t elink_8727_config_init(struct elink_phy *phy, - struct elink_params *params, - __rte_unused struct elink_vars +static uint8_t elink_8727_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) { uint32_t tx_en_mode; @@ -9291,7 +9295,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy, return ELINK_STATUS_OK; } -static elink_status_t elink_8481_config_init(struct elink_phy *phy, +static uint8_t elink_8481_config_init(struct elink_phy *phy, struct elink_params *params, struct elink_vars *vars) { @@ -9442,8 +9446,8 @@ static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc, return reset_gpios; } -static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy, - struct elink_params *params) +static void elink_84833_hw_reset_phy(struct elink_phy *phy, + struct elink_params *params) { struct bnx2x_softc *sc = params->sc; uint8_t reset_gpios; @@ -9471,8 +9475,6 @@ static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy, MISC_REGISTERS_GPIO_OUTPUT_LOW); DELAY(10); PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios); - - return ELINK_STATUS_OK; } static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy, @@ -9513,9 +9515,9 @@ static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy, } #define PHY84833_CONSTANT_LATENCY 1193 -static elink_status_t elink_848x3_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_848x3_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint8_t port, initialize = 1; @@ -9819,7 +9821,7 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy, return link_up; } -static elink_status_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str, +static uint8_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str, uint16_t * len) { elink_status_t status = ELINK_STATUS_OK; @@ -10146,9 +10148,9 @@ static void elink_54618se_specific_func(struct elink_phy *phy, } } -static elink_status_t elink_54618se_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_54618se_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint8_t port; @@ -10542,9 +10544,9 @@ static void elink_7101_config_loopback(struct elink_phy *phy, MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); } -static elink_status_t elink_7101_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_7101_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { uint16_t fw_ver1, fw_ver2, val; struct bnx2x_softc *sc = params->sc; @@ -10614,8 +10616,8 @@ static uint8_t elink_7101_read_status(struct elink_phy *phy, return link_up; } -static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str, - uint16_t * len) +static uint8_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str, + uint16_t * len) { if (*len < 5) return ELINK_STATUS_ERROR; @@ -10680,14 +10682,14 @@ static const struct elink_phy phy_null = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) NULL, - .read_status = (read_status_t) NULL, - .link_reset = (link_reset_t) NULL, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = NULL, + .read_status = NULL, + .link_reset = NULL, + .config_loopback = NULL, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_serdes = { @@ -10714,14 +10716,14 @@ static const struct elink_phy phy_serdes = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_xgxs_config_init, - .read_status = (read_status_t) elink_link_settings_status, - .link_reset = (link_reset_t) elink_int_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_xgxs_config_init, + .read_status = elink_link_settings_status, + .link_reset = elink_int_link_reset, + .config_loopback = NULL, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_xgxs = { @@ -10749,14 +10751,14 @@ static const struct elink_phy phy_xgxs = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_xgxs_config_init, - .read_status = (read_status_t) elink_link_settings_status, - .link_reset = (link_reset_t) elink_int_link_reset, - .config_loopback = (config_loopback_t) elink_set_xgxs_loopback, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) elink_xgxs_specific_func + .config_init = elink_xgxs_config_init, + .read_status = elink_link_settings_status, + .link_reset = elink_int_link_reset, + .config_loopback = elink_set_xgxs_loopback, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = elink_xgxs_specific_func }; static const struct elink_phy phy_warpcore = { @@ -10785,14 +10787,14 @@ static const struct elink_phy phy_warpcore = { .speed_cap_mask = 0, /* req_duplex = */ 0, /* rsrv = */ 0, - .config_init = (config_init_t) elink_warpcore_config_init, - .read_status = (read_status_t) elink_warpcore_read_status, - .link_reset = (link_reset_t) elink_warpcore_link_reset, - .config_loopback = (config_loopback_t) elink_set_warpcore_loopback, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) elink_warpcore_hw_reset, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_warpcore_config_init, + .read_status = elink_warpcore_read_status, + .link_reset = elink_warpcore_link_reset, + .config_loopback = elink_set_warpcore_loopback, + .format_fw_ver = NULL, + .hw_reset = elink_warpcore_hw_reset, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_7101 = { @@ -10814,14 +10816,14 @@ static const struct elink_phy phy_7101 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_7101_config_init, - .read_status = (read_status_t) elink_7101_read_status, - .link_reset = (link_reset_t) elink_common_ext_link_reset, - .config_loopback = (config_loopback_t) elink_7101_config_loopback, - .format_fw_ver = (format_fw_ver_t) elink_7101_format_ver, - .hw_reset = (hw_reset_t) elink_7101_hw_reset, - .set_link_led = (set_link_led_t) elink_7101_set_link_led, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_7101_config_init, + .read_status = elink_7101_read_status, + .link_reset = elink_common_ext_link_reset, + .config_loopback = elink_7101_config_loopback, + .format_fw_ver = elink_7101_format_ver, + .hw_reset = elink_7101_hw_reset, + .set_link_led = elink_7101_set_link_led, + .phy_specific_func = NULL }; static const struct elink_phy phy_8073 = { @@ -10845,14 +10847,14 @@ static const struct elink_phy phy_8073 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8073_config_init, - .read_status = (read_status_t) elink_8073_read_status, - .link_reset = (link_reset_t) elink_8073_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) elink_8073_specific_func + .config_init = elink_8073_config_init, + .read_status = elink_8073_read_status, + .link_reset = elink_8073_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = elink_8073_specific_func }; static const struct elink_phy phy_8705 = { @@ -10873,14 +10875,14 @@ static const struct elink_phy phy_8705 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8705_config_init, - .read_status = (read_status_t) elink_8705_read_status, - .link_reset = (link_reset_t) elink_common_ext_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_null_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8705_config_init, + .read_status = elink_8705_read_status, + .link_reset = elink_common_ext_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_null_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_8706 = { @@ -10902,14 +10904,14 @@ static const struct elink_phy phy_8706 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8706_config_init, - .read_status = (read_status_t) elink_8706_read_status, - .link_reset = (link_reset_t) elink_common_ext_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8706_config_init, + .read_status = elink_8706_read_status, + .link_reset = elink_common_ext_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_8726 = { @@ -10932,14 +10934,14 @@ static const struct elink_phy phy_8726 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8726_config_init, - .read_status = (read_status_t) elink_8726_read_status, - .link_reset = (link_reset_t) elink_8726_link_reset, - .config_loopback = (config_loopback_t) elink_8726_config_loopback, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8726_config_init, + .read_status = elink_8726_read_status, + .link_reset = elink_8726_link_reset, + .config_loopback = elink_8726_config_loopback, + .format_fw_ver = elink_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_8727 = { @@ -10961,14 +10963,14 @@ static const struct elink_phy phy_8727 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8727_config_init, - .read_status = (read_status_t) elink_8727_read_status, - .link_reset = (link_reset_t) elink_8727_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) elink_8727_hw_reset, - .set_link_led = (set_link_led_t) elink_8727_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_8727_specific_func + .config_init = elink_8727_config_init, + .read_status = elink_8727_read_status, + .link_reset = elink_8727_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_format_ver, + .hw_reset = elink_8727_hw_reset, + .set_link_led = elink_8727_set_link_led, + .phy_specific_func = elink_8727_specific_func }; static const struct elink_phy phy_8481 = { @@ -10996,14 +10998,14 @@ static const struct elink_phy phy_8481 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8481_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_8481_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) elink_8481_hw_reset, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8481_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_8481_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = elink_8481_hw_reset, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = NULL }; static const struct elink_phy phy_84823 = { @@ -11031,14 +11033,14 @@ static const struct elink_phy phy_84823 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_848x3_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_848x3_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func + .config_init = elink_848x3_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = NULL, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = elink_848xx_specific_func }; static const struct elink_phy phy_84833 = { @@ -11065,14 +11067,14 @@ static const struct elink_phy phy_84833 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_848x3_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_848x3_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func + .config_init = elink_848x3_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = elink_84833_hw_reset_phy, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = elink_848xx_specific_func }; static const struct elink_phy phy_84834 = { @@ -11098,14 +11100,14 @@ static const struct elink_phy phy_84834 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_848x3_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_848x3_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func + .config_init = elink_848x3_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = elink_84833_hw_reset_phy, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = elink_848xx_specific_func }; static const struct elink_phy phy_54618se = { @@ -11131,14 +11133,14 @@ static const struct elink_phy phy_54618se = { .speed_cap_mask = 0, /* req_duplex = */ 0, /* rsrv = */ 0, - .config_init = (config_init_t) elink_54618se_config_init, - .read_status = (read_status_t) elink_54618se_read_status, - .link_reset = (link_reset_t) elink_54618se_link_reset, - .config_loopback = (config_loopback_t) elink_54618se_config_loopback, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) elink_5461x_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_54618se_specific_func + .config_init = elink_54618se_config_init, + .read_status = elink_54618se_read_status, + .link_reset = elink_54618se_link_reset, + .config_loopback = elink_54618se_config_loopback, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = elink_5461x_set_link_led, + .phy_specific_func = elink_54618se_specific_func }; /*****************************************************************/ @@ -12919,7 +12921,7 @@ static void elink_check_kr2_wa(struct elink_params *params, */ not_kr2_device = (((base_page & 0x8000) == 0) || (((base_page & 0x8000) && - ((next_page & 0xe0) == 0x2)))); + ((next_page & 0xe0) == 0x20)))); /* In case KR2 is already disabled, check if we need to re-enable it */ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h index 9401b7cd..236f9367 100644 --- a/drivers/net/bnx2x/elink.h +++ b/drivers/net/bnx2x/elink.h @@ -1,14 +1,14 @@ /* - * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved. + * Copyright (c) 2007-2013 Cavium Inc. All rights reserved. * * Eric Davis * David Christensen * Gary Zambrano * * Copyright (c) 2013-2015 Brocade Communications Systems, Inc. - * Copyright (c) 2015 QLogic Corporation. + * Copyright (c) 2015-2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.bnx2x_pmd for copyright and licensing details. */ diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile index 2aa04411..fd0cb523 100644 --- a/drivers/net/bnxt/Makefile +++ b/drivers/net/bnxt/Makefile @@ -1,35 +1,8 @@ -# BSD LICENSE -# -# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. -# Copyright(c) 2014 6WIND S.A. -# Copyright(c) Broadcom Limited. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation. +# Copyright(c) 2014 6WIND S.A. +# Copyright(c) Broadcom Limited. +# All rights reserved. include $(RTE_SDK)/mk/rte.vars.mk diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index b5a0badf..afaaf8c4 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_H_ @@ -51,6 +23,7 @@ #define BNXT_MAX_MTU 9500 #define VLAN_TAG_SIZE 4 #define BNXT_MAX_LED 4 +#define BNXT_NUM_VLANS 2 struct bnxt_led_info { uint8_t led_id; @@ -236,6 +209,7 @@ struct bnxt { struct rte_eth_dev *eth_dev; struct rte_eth_rss_conf rss_conf; struct rte_pci_device *pdev; + void *doorbell_base; uint32_t flags; #define BNXT_FLAG_REGISTERED (1 << 0) @@ -246,6 +220,7 @@ struct bnxt { #define BNXT_FLAG_UPDATE_HASH (1 << 5) #define BNXT_FLAG_PTP_SUPPORTED (1 << 6) #define BNXT_FLAG_MULTI_HOST (1 << 7) +#define BNXT_FLAG_NEW_RM (1 << 30) #define BNXT_FLAG_INIT_DONE (1 << 31) #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) @@ -300,6 +275,7 @@ struct bnxt { struct bnxt_link_info link_info; struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT]; + uint8_t tx_cosq_id; uint16_t fw_fid; uint8_t dflt_mac_addr[ETHER_ADDR_LEN]; @@ -321,7 +297,7 @@ struct bnxt { uint16_t vxlan_fw_dst_port_id; uint16_t geneve_fw_dst_port_id; uint32_t fw_ver; - rte_atomic64_t rx_mbuf_alloc_fail; + uint32_t hwrm_spec_code; struct bnxt_led_info leds[BNXT_MAX_LED]; uint8_t num_leds; @@ -331,8 +307,6 @@ struct bnxt { int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete); int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg); -#define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG 0x6 - bool is_bnxt_supported(struct rte_eth_dev *dev); extern const struct rte_flow_ops bnxt_flow_ops; diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c index 737bb060..ff20b6fd 100644 --- a/drivers/net/bnxt/bnxt_cpr.c +++ b/drivers/net/bnxt/bnxt_cpr.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -55,6 +27,7 @@ void bnxt_handle_async_event(struct bnxt *bp, case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: + /* FALLTHROUGH */ bnxt_link_update_op(bp->eth_dev, 1); break; case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: @@ -159,69 +132,31 @@ reject: return; } -/* For the default completion ring only */ -int bnxt_alloc_def_cp_ring(struct bnxt *bp) +int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp) { - struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - struct bnxt_ring *cp_ring = cpr->cp_ring_struct; - int rc; - - rc = bnxt_hwrm_ring_alloc(bp, cp_ring, - HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, - 0, HWRM_NA_SIGNATURE, - HWRM_NA_SIGNATURE); - if (rc) - goto err_out; - cpr->cp_doorbell = bp->pdev->mem_resource[2].addr; - B_CP_DIS_DB(cpr, cpr->cp_raw_cons); - if (BNXT_PF(bp)) - rc = bnxt_hwrm_func_cfg_def_cp(bp); - else - rc = bnxt_hwrm_vf_func_cfg_def_cp(bp); - -err_out: - return rc; -} + bool evt = 0; -void bnxt_free_def_cp_ring(struct bnxt *bp) -{ - struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - - if (cpr == NULL) - return; + if (bp == NULL || cmp == NULL) { + PMD_DRV_LOG(ERR, "invalid NULL argument\n"); + return evt; + } - bnxt_free_ring(cpr->cp_ring_struct); - cpr->cp_ring_struct = NULL; - rte_free(cpr->cp_ring_struct); - rte_free(cpr); - bp->def_cp_ring = NULL; -} + switch (CMP_TYPE(cmp)) { + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + /* Handle any async event */ + bnxt_handle_async_event(bp, cmp); + evt = 1; + break; + case CMPL_BASE_TYPE_HWRM_FWD_RESP: + /* Handle HWRM forwarded responses */ + bnxt_handle_fwd_req(bp, cmp); + evt = 1; + break; + default: + /* Ignore any other events */ + PMD_DRV_LOG(INFO, "Ignoring %02x completion\n", CMP_TYPE(cmp)); + break; + } -/* For the default completion ring only */ -int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id) -{ - struct bnxt_cp_ring_info *cpr; - struct bnxt_ring *ring; - - cpr = rte_zmalloc_socket("cpr", - sizeof(struct bnxt_cp_ring_info), - RTE_CACHE_LINE_SIZE, socket_id); - if (cpr == NULL) - return -ENOMEM; - bp->def_cp_ring = cpr; - - ring = rte_zmalloc_socket("bnxt_cp_ring_struct", - sizeof(struct bnxt_ring), - RTE_CACHE_LINE_SIZE, socket_id); - if (ring == NULL) - return -ENOMEM; - cpr->cp_ring_struct = ring; - ring->bd = (void *)cpr->cp_desc_ring; - ring->bd_dma = cpr->cp_desc_mapping; - ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); - ring->ring_mask = ring->ring_size - 1; - ring->vmem_size = 0; - ring->vmem = NULL; - - return 0; + return evt; } diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h index ce2b0cb8..6c1e6d2b 100644 --- a/drivers/net/bnxt/bnxt_cpr.h +++ b/drivers/net/bnxt/bnxt_cpr.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_CPR_H_ @@ -100,12 +72,9 @@ struct bnxt_cp_ring_info { #define RX_CMP_L2_ERRORS \ (RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR) - struct bnxt; -int bnxt_alloc_def_cp_ring(struct bnxt *bp); -void bnxt_free_def_cp_ring(struct bnxt *bp); -int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id); void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp); void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp); +int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp); #endif diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 21c46f83..6e56bfd3 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -57,12 +29,13 @@ #define DRV_MODULE_NAME "bnxt" static const char bnxt_version[] = - "Broadcom Cumulus driver " DRV_MODULE_NAME "\n"; + "Broadcom NetXtreme driver " DRV_MODULE_NAME "\n"; int bnxt_logtype_driver; #define PCI_VENDOR_ID_BROADCOM 0x14E4 -#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606 +#define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609 #define BROADCOM_DEV_ID_STRATUS_NIC 0x1614 #define BROADCOM_DEV_ID_57414_VF 0x16c1 #define BROADCOM_DEV_ID_57301 0x16c8 @@ -97,10 +70,15 @@ int bnxt_logtype_driver; #define BROADCOM_DEV_ID_57407_MF 0x16ea #define BROADCOM_DEV_ID_57414_MF 0x16ec #define BROADCOM_DEV_ID_57416_MF 0x16ee +#define BROADCOM_DEV_ID_58802 0xd802 +#define BROADCOM_DEV_ID_58804 0xd804 +#define BROADCOM_DEV_ID_58808 0x16f0 static const struct rte_pci_id bnxt_pci_id_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, - BROADCOM_DEV_ID_STRATUS_NIC_VF) }, + BROADCOM_DEV_ID_STRATUS_NIC_VF1) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, + BROADCOM_DEV_ID_STRATUS_NIC_VF2) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, @@ -135,6 +113,9 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -146,8 +127,31 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV6_UDP) +#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ + DEV_TX_OFFLOAD_GRE_TNL_TSO | \ + DEV_TX_OFFLOAD_IPIP_TNL_TSO | \ + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_CRC_STRIP | \ + DEV_RX_OFFLOAD_TCP_LRO) + static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); +static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu); /***********************/ @@ -164,23 +168,12 @@ static void bnxt_free_mem(struct bnxt *bp) bnxt_free_stats(bp); bnxt_free_tx_rings(bp); bnxt_free_rx_rings(bp); - bnxt_free_def_cp_ring(bp); } static int bnxt_alloc_mem(struct bnxt *bp) { int rc; - /* Default completion ring */ - rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY); - if (rc) - goto alloc_mem_err; - - rc = bnxt_alloc_rings(bp, 0, NULL, NULL, - bp->def_cp_ring, "def_cp"); - if (rc) - goto alloc_mem_err; - rc = bnxt_alloc_vnic_mem(bp); if (rc) goto alloc_mem_err; @@ -215,10 +208,12 @@ static int bnxt_init_chip(struct bnxt *bp) rte_intr_disable(intr_handle); if (bp->eth_dev->data->mtu > ETHER_MTU) { - bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + bp->eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; bp->flags |= BNXT_FLAG_JUMBO; } else { - bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + bp->eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; bp->flags &= ~BNXT_FLAG_JUMBO; } @@ -289,7 +284,8 @@ static int bnxt_init_chip(struct bnxt *bp) bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); - if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) + if (bp->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TCP_LRO) bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1); else bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0); @@ -389,10 +385,6 @@ static int bnxt_init_nic(struct bnxt *bp) bnxt_init_vnics(bp); bnxt_init_filters(bp); - rc = bnxt_init_chip(bp); - if (rc) - return rc; - return 0; } @@ -407,8 +399,6 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, uint16_t max_vnics, i, j, vpool, vrxq; unsigned int max_rx_rings; - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - /* MAC Specifics */ dev_info->max_mac_addrs = bp->max_l2_ctx; dev_info->max_hash_mac_addrs = 0; @@ -416,9 +406,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, /* PF/VF specifics */ if (BNXT_PF(bp)) dev_info->max_vfs = bp->pdev->max_vfs; - max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx, - RTE_MIN(bp->max_rsscos_ctx, - bp->max_stat_ctx))); + max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx); /* For the sake of symmetry, max_rx_queues = max_tx_queues */ dev_info->max_rx_queues = max_rx_rings; dev_info->max_tx_queues = max_rx_rings; @@ -430,21 +418,12 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, dev_info->min_rx_bufsize = 1; dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO | - DEV_TX_OFFLOAD_IPIP_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + + dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT; + if (bp->flags & BNXT_FLAG_PTP_SUPPORTED) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP; + dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT; + dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; /* *INDENT-OFF* */ dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -454,7 +433,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, .wthresh = 0, }, .rx_free_thresh = 32, - .rx_drop_en = 0, + /* If no descriptors available, pkts are dropped by default */ + .rx_drop_en = 1, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -465,8 +445,6 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, }, .tx_free_thresh = 32, .tx_rs_thresh = 32, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, }; eth_dev->data->dev_conf.intr_conf.lsc = 1; @@ -510,18 +488,38 @@ found: static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; bp->rx_queues = (void *)eth_dev->data->rx_queues; bp->tx_queues = (void *)eth_dev->data->tx_queues; + bp->tx_nr_rings = eth_dev->data->nb_tx_queues; + bp->rx_nr_rings = eth_dev->data->nb_rx_queues; + + if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { + int rc; + + rc = bnxt_hwrm_func_reserve_vf_resc(bp); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); + return -ENOSPC; + } + + /* legacy driver needs to get updated values */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); + return -ENOSPC; + } + } /* Inherit new configurations */ if (eth_dev->data->nb_rx_queues > bp->max_rx_rings || eth_dev->data->nb_tx_queues > bp->max_tx_rings || - eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + 1 > + eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > bp->max_cp_rings || eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > bp->max_stat_ctx || - (uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) { + (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) { PMD_DRV_LOG(ERR, "Insufficient resources to support requested config\n"); PMD_DRV_LOG(ERR, @@ -535,15 +533,16 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) return -ENOSPC; } - bp->rx_nr_rings = eth_dev->data->nb_rx_queues; - bp->tx_nr_rings = eth_dev->data->nb_tx_queues; bp->rx_cp_nr_rings = bp->rx_nr_rings; bp->tx_cp_nr_rings = bp->tx_nr_rings; - if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { eth_dev->data->mtu = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE; + ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * + BNXT_NUM_VLANS; + bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); + } return 0; } @@ -571,6 +570,7 @@ static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; int vlan_mask = 0; int rc; @@ -581,15 +581,15 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) } bp->dev_stopped = 0; - rc = bnxt_init_nic(bp); + rc = bnxt_init_chip(bp); if (rc) goto error; bnxt_link_update_op(eth_dev, 1); - if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) vlan_mask |= ETH_VLAN_FILTER_MASK; - if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) vlan_mask |= ETH_VLAN_STRIP_MASK; rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); if (rc) @@ -635,13 +635,15 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + bp->flags &= ~BNXT_FLAG_INIT_DONE; if (bp->eth_dev->data->dev_started) { /* TBD: STOP HW queues DMA */ eth_dev->data->dev_link.link_status = 0; } bnxt_set_hwrm_link_config(bp, false); bnxt_hwrm_port_clr_stats(bp); - bp->flags &= ~BNXT_FLAG_INIT_DONE; + bnxt_free_tx_mbufs(bp); + bnxt_free_rx_mbufs(bp); bnxt_shutdown_nic(bp); bp->dev_stopped = 1; } @@ -653,8 +655,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) if (bp->dev_stopped == 0) bnxt_dev_stop_op(eth_dev); - bnxt_free_tx_mbufs(bp); - bnxt_free_rx_mbufs(bp); bnxt_free_mem(bp); if (eth_dev->data->mac_addrs != NULL) { rte_free(eth_dev->data->mac_addrs); @@ -771,6 +771,11 @@ out: new.link_speed != eth_dev->data->dev_link.link_speed) { memcpy(ð_dev->data->dev_link, &new, sizeof(struct rte_eth_link)); + + _rte_eth_dev_callback_process(eth_dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + bnxt_print_link_info(eth_dev); } @@ -1366,30 +1371,31 @@ static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) { struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; unsigned int i; if (mask & ETH_VLAN_FILTER_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_filter) { + if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { /* Remove any VLAN filters programmed */ for (i = 0; i < 4095; i++) bnxt_del_vlan_filter(bp, i); } PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n", - dev->data->dev_conf.rxmode.hw_vlan_filter); + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)); } if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) vnic->vlan_strip = true; else vnic->vlan_strip = false; bnxt_hwrm_vnic_cfg(bp, vnic); } PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n", - dev->data->dev_conf.rxmode.hw_vlan_strip); + !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)); } if (mask & ETH_VLAN_EXTEND_MASK) @@ -1398,7 +1404,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) return 0; } -static void +static int bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) { struct bnxt *bp = (struct bnxt *)dev->data->dev_private; @@ -1408,7 +1414,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) int rc; if (BNXT_VF(bp)) - return; + return -EPERM; memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr)); @@ -1418,7 +1424,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) continue; rc = bnxt_hwrm_clear_l2_filter(bp, filter); if (rc) - break; + return rc; memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; @@ -1427,10 +1433,12 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); if (rc) - break; + return rc; filter->mac_index = 0; PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); } + + return 0; } static int @@ -1515,7 +1523,6 @@ bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = 0; - qinfo->conf.txq_flags = txq->txq_flags; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } @@ -1540,9 +1547,11 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) if (new_mtu > ETHER_MTU) { bp->flags |= BNXT_FLAG_JUMBO; - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + bp->eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; } else { - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + bp->eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; bp->flags &= ~BNXT_FLAG_JUMBO; } @@ -2358,7 +2367,8 @@ bnxt_parse_fdir_filter(struct bnxt *bp, } static struct bnxt_filter_info * -bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) +bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf, + struct bnxt_vnic_info **mvnic) { struct bnxt_filter_info *mf = NULL; int i; @@ -2396,8 +2406,11 @@ bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, sizeof(nf->dst_ipaddr)) && !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, - sizeof(nf->dst_ipaddr_mask))) + sizeof(nf->dst_ipaddr_mask))) { + if (mvnic) + *mvnic = vnic; return mf; + } } } return NULL; @@ -2411,7 +2424,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, struct bnxt *bp = (struct bnxt *)dev->data->dev_private; struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; struct bnxt_filter_info *filter, *match; - struct bnxt_vnic_info *vnic; + struct bnxt_vnic_info *vnic, *mvnic; int ret = 0, i; if (filter_op == RTE_ETH_FILTER_NOP) @@ -2436,11 +2449,31 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, goto free_filter; filter->filter_type = HWRM_CFA_NTUPLE_FILTER; - match = bnxt_match_fdir(bp, filter); + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + vnic = STAILQ_FIRST(&bp->ff_pool[0]); + else + vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); + + match = bnxt_match_fdir(bp, filter, &mvnic); if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { - PMD_DRV_LOG(ERR, "Flow already exists.\n"); - ret = -EEXIST; - goto free_filter; + if (match->dst_id == vnic->fw_vnic_id) { + PMD_DRV_LOG(ERR, "Flow already exists.\n"); + ret = -EEXIST; + goto free_filter; + } else { + match->dst_id = vnic->fw_vnic_id; + ret = bnxt_hwrm_set_ntuple_filter(bp, + match->dst_id, + match); + STAILQ_REMOVE(&mvnic->filter, match, + bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&vnic->filter, match, next); + PMD_DRV_LOG(ERR, + "Filter with matching pattern exist\n"); + PMD_DRV_LOG(ERR, + "Updated it to new destination q\n"); + goto free_filter; + } } if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { PMD_DRV_LOG(ERR, "Flow does not exist.\n"); @@ -2448,12 +2481,6 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, goto free_filter; } - if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) - vnic = STAILQ_FIRST(&bp->ff_pool[0]); - else - vnic = - STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); - if (filter_op == RTE_ETH_FILTER_ADD) { ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, @@ -2489,7 +2516,6 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, case RTE_ETH_FILTER_UPDATE: case RTE_ETH_FILTER_STATS: case RTE_ETH_FILTER_INFO: - /* FALLTHROUGH */ PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op); break; default: @@ -2876,6 +2902,7 @@ static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) case BNX_DIR_TYPE_KONG_PATCH: case BNX_DIR_TYPE_BONO_FW: case BNX_DIR_TYPE_BONO_PATCH: + /* FALLTHROUGH */ return true; } @@ -2894,6 +2921,7 @@ static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) case BNX_DIR_TYPE_ISCSI_BOOT: case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: + /* FALLTHROUGH */ return true; } @@ -3032,7 +3060,8 @@ static bool bnxt_vf_pciid(uint16_t id) id == BROADCOM_DEV_ID_5731X_VF || id == BROADCOM_DEV_ID_5741X_VF || id == BROADCOM_DEV_ID_57414_VF || - id == BROADCOM_DEV_ID_STRATUS_NIC_VF) + id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF2) return true; return false; } @@ -3060,11 +3089,23 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev) rc = -ENOMEM; goto init_err_release; } + + if (!pci_dev->mem_resource[2].addr) { + PMD_DRV_LOG(ERR, + "Cannot find PCI device BAR 2 address, aborting\n"); + rc = -ENODEV; + goto init_err_release; + } else { + bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; + } + return 0; init_err_release: if (bp->bar0) bp->bar0 = NULL; + if (bp->doorbell_base) + bp->doorbell_base = NULL; init_err_disable: @@ -3098,7 +3139,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) bp = eth_dev->data->dev_private; - rte_atomic64_init(&bp->rx_mbuf_alloc_fail); bp->dev_stopped = 1; if (rte_eal_process_type() != RTE_PROC_PRIMARY) @@ -3115,12 +3155,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) } skip_init: eth_dev->dev_ops = &bnxt_dev_ops; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; eth_dev->rx_pkt_burst = &bnxt_recv_pkts; eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; - if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { + if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, @@ -3131,9 +3171,10 @@ skip_init: sizeof(struct rx_port_stats) + 512); if (!mz) { mz = rte_memzone_reserve(mz_name, total_alloc_len, - SOCKET_ID_ANY, - RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY); + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); if (mz == NULL) return -ENOMEM; } @@ -3165,10 +3206,12 @@ skip_init: total_alloc_len = RTE_CACHE_LINE_ROUNDUP( sizeof(struct tx_port_stats) + 512); if (!mz) { - mz = rte_memzone_reserve(mz_name, total_alloc_len, - SOCKET_ID_ANY, - RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY); + mz = rte_memzone_reserve(mz_name, + total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); if (mz == NULL) return -ENOMEM; } @@ -3236,7 +3279,7 @@ skip_init: goto error_free; } - if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) { + if (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) { PMD_DRV_LOG(ERR, "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n", bp->dflt_mac_addr[0], bp->dflt_mac_addr[1], @@ -3344,17 +3387,13 @@ skip_init: if (rc) goto error_free_int; - rc = bnxt_alloc_def_cp_ring(bp); - if (rc) - goto error_free_int; - bnxt_enable_int(bp); + bnxt_init_nic(bp); return 0; error_free_int: bnxt_disable_int(bp); - bnxt_free_def_cp_ring(bp); bnxt_hwrm_func_buf_unrgtr(bp); bnxt_free_int(bp); bnxt_free_mem(bp); @@ -3438,7 +3477,7 @@ bnxt_init_log(void) { bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver"); if (bnxt_logtype_driver >= 0) - rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE); + rte_log_set_level(bnxt_logtype_driver, RTE_LOG_INFO); } RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c index 032e8eed..e36da997 100644 --- a/drivers/net/bnxt/bnxt_filter.c +++ b/drivers/net/bnxt/bnxt_filter.c @@ -1,38 +1,11 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include +#include #include #include #include @@ -96,9 +69,9 @@ void bnxt_init_filters(struct bnxt *bp) STAILQ_INIT(&bp->free_filter_list); for (i = 0; i < max_filters; i++) { filter = &bp->filter_info[i]; - filter->fw_l2_filter_id = -1; - filter->fw_em_filter_id = -1; - filter->fw_ntuple_filter_id = -1; + filter->fw_l2_filter_id = UINT64_MAX; + filter->fw_em_filter_id = UINT64_MAX; + filter->fw_ntuple_filter_id = UINT64_MAX; STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); } } @@ -159,6 +132,14 @@ void bnxt_free_filter_mem(struct bnxt *bp) rte_free(bp->filter_info); bp->filter_info = NULL; + + for (i = 0; i < bp->pf.max_vfs; i++) { + STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) { + rte_free(filter); + STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter, + bnxt_filter_info, next); + } + } } int bnxt_alloc_filter_mem(struct bnxt *bp) @@ -250,7 +231,7 @@ nxt_non_void_action(const struct rte_flow_action *cur) } } -int check_zero_bytes(const uint8_t *bytes, int len) +int bnxt_check_zero_bytes(const uint8_t *bytes, int len) { int i; for (i = 0; i < len; i++) @@ -302,6 +283,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[], static int bnxt_validate_and_parse_flow_type(struct bnxt *bp, + const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], struct rte_flow_error *error, struct bnxt_filter_info *filter) @@ -326,6 +308,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, uint32_t vf = 0; int use_ntuple; uint32_t en = 0; + uint32_t en_ethertype; int dflt_vnic; use_ntuple = bnxt_filter_type_check(pattern, error); @@ -335,6 +318,9 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, filter->filter_type = use_ntuple ? HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER; + en_ethertype = use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : + EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; while (item->type != RTE_FLOW_ITEM_TYPE_END) { if (item->last) { @@ -354,8 +340,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, } switch (item->type) { case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Source MAC address mask cannot be partially set. * Should be All 0's or all 1's. @@ -374,7 +360,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, } /* Mask is not allowed. Only exact matches are */ - if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) { + if (eth_mask->type && + eth_mask->type != RTE_BE16(0xffff)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -400,41 +387,58 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, * RTE_LOG(ERR, PMD, "Handle this condition\n"); * } */ - if (eth_spec->type) { + if (eth_mask->type) { filter->ethertype = rte_be_to_cpu_16(eth_spec->type); - en |= use_ntuple ? - NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : - EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; + en |= en_ethertype; } break; case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; - if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) { + vlan_spec = item->spec; + vlan_mask = item->mask; + if (en & en_ethertype) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN TPID matching is not" + " supported"); + return -rte_errno; + } + if (vlan_mask->tci && + vlan_mask->tci == RTE_BE16(0x0fff)) { /* Only the VLAN ID can be matched. */ filter->l2_ovlan = rte_be_to_cpu_16(vlan_spec->tci & - 0xFFF); + RTE_BE16(0x0fff)); en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; - } else { + } else if (vlan_mask->tci) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VLAN mask is invalid"); return -rte_errno; } + if (vlan_mask->inner_type && + vlan_mask->inner_type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "inner ethertype mask not" + " valid"); + return -rte_errno; + } + if (vlan_mask->inner_type) { + filter->ethertype = + rte_be_to_cpu_16(vlan_spec->inner_type); + en |= en_ethertype; + } break; case RTE_FLOW_ITEM_TYPE_IPV4: /* If mask is not involved, we could use EM filters. */ - ipv4_spec = - (const struct rte_flow_item_ipv4 *)item->spec; - ipv4_mask = - (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_spec = item->spec; + ipv4_mask = item->mask; /* Only IP DST and SRC fields are maskable. */ if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.type_of_service || @@ -483,10 +487,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, } break; case RTE_FLOW_ITEM_TYPE_IPV6: - ipv6_spec = - (const struct rte_flow_item_ipv6 *)item->spec; - ipv6_mask = - (const struct rte_flow_item_ipv6 *)item->mask; + ipv6_spec = item->spec; + ipv6_mask = item->mask; /* Only IP DST and SRC fields are maskable. */ if (ipv6_mask->hdr.vtc_flow || @@ -510,13 +512,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, ipv6_spec->hdr.src_addr, 16); rte_memcpy(filter->dst_ipaddr, ipv6_spec->hdr.dst_addr, 16); - if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) { + if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr, + 16)) { rte_memcpy(filter->src_ipaddr_mask, ipv6_mask->hdr.src_addr, 16); en |= !use_ntuple ? 0 : NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; } - if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) { + if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr, + 16)) { rte_memcpy(filter->dst_ipaddr_mask, ipv6_mask->hdr.dst_addr, 16); en |= !use_ntuple ? 0 : @@ -527,8 +531,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; break; case RTE_FLOW_ITEM_TYPE_TCP: - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; /* Check TCP mask. Only DST & SRC ports are maskable */ if (tcp_mask->hdr.sent_seq || @@ -564,8 +568,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, } break; case RTE_FLOW_ITEM_TYPE_UDP: - udp_spec = (const struct rte_flow_item_udp *)item->spec; - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_spec = item->spec; + udp_mask = item->mask; if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { @@ -597,10 +601,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, } break; case RTE_FLOW_ITEM_TYPE_VXLAN: - vxlan_spec = - (const struct rte_flow_item_vxlan *)item->spec; - vxlan_mask = - (const struct rte_flow_item_vxlan *)item->mask; + vxlan_spec = item->spec; + vxlan_mask = item->mask; /* Check if VXLAN item is used to describe protocol. * If yes, both spec and mask should be NULL. * If no, both spec and mask shouldn't be NULL. @@ -646,10 +648,8 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, } break; case RTE_FLOW_ITEM_TYPE_NVGRE: - nvgre_spec = - (const struct rte_flow_item_nvgre *)item->spec; - nvgre_mask = - (const struct rte_flow_item_nvgre *)item->mask; + nvgre_spec = item->spec; + nvgre_mask = item->mask; /* Check if NVGRE item is used to describe protocol. * If yes, both spec and mask should be NULL. * If no, both spec and mask shouldn't be NULL. @@ -692,7 +692,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, } break; case RTE_FLOW_ITEM_TYPE_VF: - vf_spec = (const struct rte_flow_item_vf *)item->spec; + vf_spec = item->spec; vf = vf_spec->id; if (!BNXT_PF(bp)) { rte_flow_error_set(error, EINVAL, @@ -710,6 +710,16 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, return -rte_errno; } + if (!attr->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Matching VF traffic without" + " affecting it (transfer attribute)" + " is unsupported"); + return -rte_errno; + } + filter->mirror_vnic_id = dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); if (dflt_vnic < 0) { @@ -836,7 +846,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, goto ret; } - rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter); + rc = bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, + filter); if (rc != 0) goto ret; @@ -844,7 +855,8 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, if (rc != 0) goto ret; //Since we support ingress attribute only - right now. - filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; + if (filter->filter_type == HWRM_CFA_EM_FILTER) + filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; switch (act->type) { case RTE_FLOW_ACTION_TYPE_QUEUE: @@ -956,11 +968,6 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, goto ret; } - if (filter1) { - bnxt_free_filter(bp, filter1); - filter1->fw_l2_filter_id = -1; - } - act = nxt_non_void_action(++act); if (act->type != RTE_FLOW_ACTION_TYPE_END) { rte_flow_error_set(error, EINVAL, @@ -997,7 +1004,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev, ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, error, filter); /* No need to hold on to this filter if we are just validating flow */ - filter->fw_l2_filter_id = -1; + filter->fw_l2_filter_id = UINT64_MAX; bnxt_free_filter(bp, filter); return ret; @@ -1186,8 +1193,8 @@ bnxt_flow_destroy(struct rte_eth_dev *dev, ret = bnxt_hwrm_clear_em_filter(bp, filter); if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); - - bnxt_hwrm_clear_l2_filter(bp, filter); + else + ret = bnxt_hwrm_clear_l2_filter(bp, filter); if (!ret) { STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); rte_free(flow); diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h index a3c702df..d27be703 100644 --- a/drivers/net/bnxt/bnxt_filter.h +++ b/drivers/net/bnxt/bnxt_filter.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_FILTER_H_ @@ -97,7 +69,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp); void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter); struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic); -int check_zero_bytes(const uint8_t *bytes, int len); +int bnxt_check_zero_bytes(const uint8_t *bytes, int len); #define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index b7843afe..d6fdc1b8 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -55,6 +27,8 @@ #include #define HWRM_CMD_TIMEOUT 10000 +#define HWRM_SPEC_CODE_1_8_3 0x10803 +#define HWRM_VERSION_1_9_1 0x10901 struct bnxt_plcmodes_cfg { uint32_t flags; @@ -115,7 +89,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, short_input.req_type = rte_cpu_to_le_16(req->req_type); short_input.signature = rte_cpu_to_le_16( - HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD); + HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD); short_input.size = rte_cpu_to_le_16(msg_len); short_input.req_addr = rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr); @@ -248,6 +222,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; uint32_t mask = 0; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return rc; + HWRM_PREP(req, CFA_L2_SET_RX_MASK); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -344,7 +321,7 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - filter->fw_l2_filter_id = -1; + filter->fw_l2_filter_id = UINT64_MAX; return 0; } @@ -436,16 +413,18 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp) HWRM_PREP(req, PORT_MAC_CFG); if (ptp->rx_filter) - flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; + flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; else - flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; + flags |= + HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; if (ptp->tx_tstamp_en) - flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; + flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; else - flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; + flags |= + HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; req.flags = rte_cpu_to_le_32(flags); - req.enables = - rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req.enables = rte_cpu_to_le_32 + (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -473,7 +452,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) HWRM_CHECK_RESULT(); - if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS)) + if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) return 0; ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0); @@ -505,7 +484,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) return 0; } -int bnxt_hwrm_func_qcaps(struct bnxt *bp) +static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; struct hwrm_func_qcaps_input req = {.req_type = 0 }; @@ -595,6 +574,20 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) return rc; } +int bnxt_hwrm_func_qcaps(struct bnxt *bp) +{ + int rc; + + rc = __bnxt_hwrm_func_qcaps(bp); + if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) { + rc = bnxt_hwrm_func_resc_qcaps(bp); + if (!rc) + bp->flags |= BNXT_FLAG_NEW_RM; + } + + return rc; +} + int bnxt_hwrm_func_reset(struct bnxt *bp) { int rc = 0; @@ -631,10 +624,19 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) if (BNXT_PF(bp)) { req.enables |= rte_cpu_to_le_32( - HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD); + HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD); memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd, RTE_MIN(sizeof(req.vf_req_fwd), sizeof(bp->pf.vf_req_fwd))); + + /* + * PF can sniff HWRM API issued by VF. This can be set up by + * linux driver and inherited by the DPDK PF driver. Clear + * this HWRM sniffer list in FW because DPDK PF driver does + * not support this. + */ + req.flags = + rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE); } req.async_event_fwd[0] |= @@ -655,6 +657,64 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) return rc; } +int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp) +{ + int rc; + struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_vf_cfg_input req = {0}; + + HWRM_PREP(req, FUNC_VF_CFG); + + req.enables = rte_cpu_to_le_32 + (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); + + req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings); + req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings * + AGG_RING_MULTIPLIER); + req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings); + req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings + + bp->tx_nr_rings); + req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return rc; +} + +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) +{ + int rc; + struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_resource_qcaps_input req = {0}; + + HWRM_PREP(req, FUNC_RESOURCE_QCAPS); + req.fid = rte_cpu_to_le_16(0xffff); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + + if (BNXT_VF(bp)) { + bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); + bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); + bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); + bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); + bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); + bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); + } + + HWRM_UNLOCK(); + return rc; +} + int bnxt_hwrm_ver_get(struct bnxt *bp) { int rc = 0; @@ -678,11 +738,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) HWRM_CHECK_RESULT(); PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n", - resp->hwrm_intf_maj, resp->hwrm_intf_min, - resp->hwrm_intf_upd, - resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld); - bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) | - (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd; + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b, + resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b); + bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) | + (resp->hwrm_fw_min_8b << 16) | + (resp->hwrm_fw_bld_8b << 8) | + resp->hwrm_fw_rsvd_8b; PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n", HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE); @@ -690,11 +752,12 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) my_version |= HWRM_VERSION_MINOR << 8; my_version |= HWRM_VERSION_UPDATE; - fw_version = resp->hwrm_intf_maj << 16; - fw_version |= resp->hwrm_intf_min << 8; - fw_version |= resp->hwrm_intf_upd; + fw_version = resp->hwrm_intf_maj_8b << 16; + fw_version |= resp->hwrm_intf_min_8b << 8; + fw_version |= resp->hwrm_intf_upd_8b; + bp->hwrm_spec_code = fw_version; - if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) { + if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) { PMD_DRV_LOG(ERR, "Unsupported firmware API version\n"); rc = -EINVAL; goto error; @@ -750,7 +813,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && (dev_caps_cfg & - HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) { + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) { PMD_DRV_LOG(DEBUG, "Short command supported\n"); rte_free(bp->hwrm_short_cmd_req_addr); @@ -919,9 +982,15 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) int rc = 0; struct hwrm_queue_qportcfg_input req = {.req_type = 0 }; struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + int i; HWRM_PREP(req, QUEUE_QPORTCFG); + req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX; + /* HWRM Version >= 1.9.1 */ + if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1) + req.drv_qmap_cap = + HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT(); @@ -941,6 +1010,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) HWRM_UNLOCK(); + if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) { + bp->tx_cosq_id = bp->cos_queue[0].id; + } else { + /* iterate and find the COSq profile to use for Tx */ + for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { + if (bp->cos_queue[i].profile == + HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { + bp->tx_cosq_id = bp->cos_queue[i].id; + break; + } + } + } + PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id); + return rc; } @@ -964,7 +1047,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, switch (ring_type) { case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: - req.queue_id = bp->cos_queue[0].id; + req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id); /* FALLTHROUGH */ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: req.ring_type = ring_type; @@ -1193,7 +1276,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) HWRM_PREP(req, VNIC_ALLOC); if (vnic->func_default) - req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT; + req.flags = + rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT(); @@ -1214,7 +1298,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, HWRM_PREP(req, VNIC_PLCMODES_QCFG); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -1242,7 +1326,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, HWRM_PREP(req, VNIC_PLCMODES_CFG); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.flags = rte_cpu_to_le_32(pmode->flags); req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh); req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset); @@ -1451,6 +1535,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, HWRM_PREP(req, VNIC_RSS_CFG); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); + req.hash_mode_flags = vnic->hash_mode; req.ring_grp_tbl_addr = rte_cpu_to_le_64(vnic->rss_table_dma_addr); @@ -1486,7 +1571,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, size -= RTE_PKTMBUF_HEADROOM; req.jumbo_thresh = rte_cpu_to_le_16(size); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -1517,12 +1602,12 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ); - req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); req.max_agg_segs = rte_cpu_to_le_16(5); req.max_aggs = rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX); req.min_agg_len = rte_cpu_to_le_32(512); } + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -1607,10 +1692,9 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes); stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes); - stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts); - stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts); - - stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts); + stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts); + stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts); + stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts); HWRM_UNLOCK(); @@ -1755,7 +1839,7 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) struct bnxt_tx_ring_info *txr = txq->tx_ring; struct bnxt_ring *ring = txr->tx_ring_struct; struct bnxt_cp_ring_info *cpr = txq->cp_ring; - unsigned int idx = bp->rx_cp_nr_rings + i + 1; + unsigned int idx = bp->rx_cp_nr_rings + i; if (ring->fw_ring_id != INVALID_HW_RING_ID) { bnxt_hwrm_ring_free(bp, ring, @@ -1781,13 +1865,12 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) struct bnxt_rx_ring_info *rxr = rxq->rx_ring; struct bnxt_ring *ring = rxr->rx_ring_struct; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - unsigned int idx = i + 1; if (ring->fw_ring_id != INVALID_HW_RING_ID) { bnxt_hwrm_ring_free(bp, ring, HWRM_RING_FREE_INPUT_RING_TYPE_RX); ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; memset(rxr->rx_desc_ring, 0, rxr->rx_ring_struct->ring_size * sizeof(*rxr->rx_desc_ring)); @@ -1808,7 +1891,7 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID; } if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr, idx); + bnxt_free_cp_ring(bp, cpr, i); bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; } @@ -1992,6 +2075,7 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) switch (conf_link_speed) { case ETH_LINK_SPEED_10M_HD: case ETH_LINK_SPEED_100M_HD: + /* FALLTHROUGH */ return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF; } return hw_link_duplex; @@ -2012,6 +2096,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) { case ETH_LINK_SPEED_100M: case ETH_LINK_SPEED_100M_HD: + /* FALLTHROUGH */ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB; break; @@ -2176,6 +2261,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex) switch (hw_link_duplex) { case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH: case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL: + /* FALLTHROUGH */ eth_link_duplex = ETH_LINK_FULL_DUPLEX; break; case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF: @@ -2305,6 +2391,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0: + /* FALLTHROUGH */ bp->port_partition_type = resp->port_partition_type; break; default: @@ -2362,7 +2449,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE); + ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings); @@ -2399,9 +2487,11 @@ static void populate_vf_func_cfg_req(struct bnxt *bp, HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE); + ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + - ETHER_CRC_LEN + VLAN_TAG_SIZE); + ETHER_CRC_LEN + VLAN_TAG_SIZE * + BNXT_NUM_VLANS); req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / (num_vfs + 1)); req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1)); @@ -2766,9 +2856,9 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) req.req_buf_page_size = rte_cpu_to_le_16( page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); - req.req_buf_page_addr[0] = + req.req_buf_page_addr0 = rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); - if (req.req_buf_page_addr[0] == 0) { + if (req.req_buf_page_addr0 == 0) { PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); return -ENOMEM; @@ -2915,6 +3005,18 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) return rc; } +int bnxt_hwrm_set_async_event_cr(struct bnxt *bp) +{ + int rc; + + if (BNXT_PF(bp)) + rc = bnxt_hwrm_func_cfg_def_cp(bp); + else + rc = bnxt_hwrm_vf_func_cfg_def_cp(bp); + + return rc; +} + int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, void *encaped, size_t ec_size) { @@ -3029,9 +3131,6 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp) struct bnxt_pf_info *pf = &bp->pf; int rc; - if (!(bp->flags & BNXT_FLAG_PORT_STATS)) - return 0; - HWRM_PREP(req, PORT_QSTATS); req.port_id = rte_cpu_to_le_16(pf->port_id); @@ -3588,8 +3687,8 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - filter->fw_em_filter_id = -1; - filter->fw_l2_filter_id = -1; + filter->fw_em_filter_id = UINT64_MAX; + filter->fw_l2_filter_id = UINT64_MAX; return 0; } @@ -3700,7 +3799,8 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - filter->fw_ntuple_filter_id = -1; + filter->fw_ntuple_filter_id = UINT64_MAX; + filter->fw_l2_filter_id = UINT64_MAX; return 0; } diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h index f11e72a3..60a4ab16 100644 --- a/drivers/net/bnxt/bnxt_hwrm.h +++ b/drivers/net/bnxt/bnxt_hwrm.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_HWRM_H_ @@ -54,6 +26,9 @@ struct bnxt_cp_ring_info; #define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32)) +#define HWRM_QUEUE_SERVICE_PROFILE_LOSSY \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY + int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic, @@ -88,6 +63,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp); int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); +int bnxt_hwrm_set_async_event_cr(struct bnxt *bp); int bnxt_hwrm_ring_alloc(struct bnxt *bp, struct bnxt_ring *ring, uint32_t ring_type, uint32_t map_index, @@ -135,6 +111,8 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp); int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link); int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up); int bnxt_hwrm_func_qcfg(struct bnxt *bp); +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp); +int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp); int bnxt_hwrm_allocate_pf_only(struct bnxt *bp); int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs); int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c index 8ab98693..7ef7023e 100644 --- a/drivers/net/bnxt/bnxt_irq.c +++ b/drivers/net/bnxt/bnxt_irq.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Broadcom Corporation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -68,30 +40,10 @@ static void bnxt_int_handler(void *param) if (!CMP_VALID(cmp, raw_cons, cpr->cp_ring_struct)) break; - switch (CMP_TYPE(cmp)) { - case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: - /* Handle any async event */ - bnxt_handle_async_event(bp, cmp); - break; - case CMPL_BASE_TYPE_HWRM_FWD_REQ: - /* Handle HWRM forwarded responses */ - bnxt_handle_fwd_req(bp, cmp); - break; - default: - /* Ignore any other events */ - if (cmp->type & rte_cpu_to_le_16(0x01)) { - if (!CMP_VALID(cmp, raw_cons, - cpr->cp_ring_struct)) - goto no_more; - } - PMD_DRV_LOG(INFO, - "Ignoring %02x completion\n", CMP_TYPE(cmp)); - break; - } + bnxt_event_hwrm_resp_handler(bp, cmp); raw_cons = NEXT_RAW_CMP(raw_cons); - }; -no_more: + cpr->cp_raw_cons = raw_cons; B_CP_DB_REARM(cpr, cpr->cp_raw_cons); } @@ -127,7 +79,9 @@ void bnxt_enable_int(struct bnxt *bp) { struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - B_CP_DB_ARM(cpr); + /* Only the default completion ring */ + if (cpr != NULL && cpr->cp_doorbell != NULL) + B_CP_DB_ARM(cpr); } int bnxt_setup_int(struct bnxt *bp) diff --git a/drivers/net/bnxt/bnxt_irq.h b/drivers/net/bnxt/bnxt_irq.h index 4d2f7af9..75ba2135 100644 --- a/drivers/net/bnxt/bnxt_irq.h +++ b/drivers/net/bnxt/bnxt_irq.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Broadcom Corporation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_IRQ_H_ diff --git a/drivers/net/bnxt/bnxt_nvm_defs.h b/drivers/net/bnxt/bnxt_nvm_defs.h index c5ccc9bc..ea9d4a9d 100644 --- a/drivers/net/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/bnxt/bnxt_nvm_defs.h @@ -1,11 +1,6 @@ -/* Broadcom NetXtreme-C/E network driver. - * - * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_NVM_DEFS_H_ diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c index 8fb89721..bb9f6d1c 100644 --- a/drivers/net/bnxt/bnxt_ring.c +++ b/drivers/net/bnxt/bnxt_ring.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -52,11 +24,14 @@ void bnxt_free_ring(struct bnxt_ring *ring) { + if (!ring) + return; + if (ring->vmem_size && *ring->vmem) { memset((char *)*ring->vmem, 0, ring->vmem_size); *ring->vmem = NULL; } - rte_memzone_free((const struct rte_memzone *)ring->mem_zone); + ring->mem_zone = NULL; } /* @@ -89,22 +64,26 @@ int bnxt_init_ring_grps(struct bnxt *bp) * rx bd ring - Only non-zero length if rx_ring_info is not NULL */ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, - struct bnxt_tx_ring_info *tx_ring_info, - struct bnxt_rx_ring_info *rx_ring_info, + struct bnxt_tx_queue *txq, + struct bnxt_rx_queue *rxq, struct bnxt_cp_ring_info *cp_ring_info, const char *suffix) { struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct; + struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL; + struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL; struct bnxt_ring *tx_ring; struct bnxt_ring *rx_ring; struct rte_pci_device *pdev = bp->pdev; + uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads; const struct rte_memzone *mz = NULL; char mz_name[RTE_MEMZONE_NAMESIZE]; rte_iova_t mz_phys_addr; int sz; int stats_len = (tx_ring_info || rx_ring_info) ? - RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0; + RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) - + sizeof (struct hwrm_resp_hdr)) : 0; int cp_vmem_start = stats_len; int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size); @@ -155,7 +134,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, sizeof(struct bnxt_tpa_info)) : 0; int total_alloc_len = tpa_info_start; - if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) total_alloc_len += tpa_info_len; snprintf(mz_name, RTE_MEMZONE_NAMESIZE, @@ -166,10 +145,11 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, mz = rte_memzone_lookup(mz_name); if (!mz) { mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len, - SOCKET_ID_ANY, - RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY, - getpagesize()); + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG, + getpagesize()); if (mz == NULL) return -ENOMEM; } @@ -191,6 +171,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, } if (tx_ring_info) { + txq->mz = mz; tx_ring = tx_ring_info->tx_ring_struct; tx_ring->bd = ((char *)mz->addr + tx_ring_start); @@ -210,6 +191,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, } if (rx_ring_info) { + rxq->mz = mz; rx_ring = rx_ring_info->rx_ring_struct; rx_ring->bd = ((char *)mz->addr + rx_ring_start); @@ -252,7 +234,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, ag_bitmap_start, ag_bitmap_len); /* TPA info */ - if (bp->eth_dev->data->dev_conf.rxmode.enable_lro) + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) rx_ring_info->tpa_info = ((struct bnxt_tpa_info *)((char *)mz->addr + tpa_info_start)); @@ -283,7 +265,6 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, */ int bnxt_alloc_hwrm_rings(struct bnxt *bp) { - struct rte_pci_device *pci_dev = bp->pdev; unsigned int i; int rc = 0; @@ -293,33 +274,48 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) struct bnxt_ring *cp_ring = cpr->cp_ring_struct; struct bnxt_rx_ring_info *rxr = rxq->rx_ring; struct bnxt_ring *ring = rxr->rx_ring_struct; - unsigned int idx = i + 1; - unsigned int map_idx = idx + bp->rx_cp_nr_rings; + unsigned int map_idx = i + bp->rx_cp_nr_rings; bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; /* Rx cmpl */ - rc = bnxt_hwrm_ring_alloc(bp, cp_ring, - HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, - idx, HWRM_NA_SIGNATURE, - HWRM_NA_SIGNATURE); + rc = bnxt_hwrm_ring_alloc + (bp, + cp_ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, + i, + HWRM_NA_SIGNATURE, + HWRM_NA_SIGNATURE); if (rc) goto err_out; - cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr + - idx * 0x80; + cpr->cp_doorbell = (char *)bp->doorbell_base + i * 0x80; bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + if (!i) { + /* + * In order to save completion resource, use the first + * completion ring from PF or VF as the default + * completion ring for async event & HWRM + * forward response handling. + */ + bp->def_cp_ring = cpr; + rc = bnxt_hwrm_set_async_event_cr(bp); + if (rc) + goto err_out; + } + /* Rx ring */ - rc = bnxt_hwrm_ring_alloc(bp, ring, - HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, - idx, cpr->hw_stats_ctx_id, - cp_ring->fw_ring_id); + rc = bnxt_hwrm_ring_alloc(bp, + ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, + i, + cpr->hw_stats_ctx_id, + cp_ring->fw_ring_id); if (rc) goto err_out; rxr->rx_prod = 0; - rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr + - idx * 0x80; + rxr->rx_doorbell = (char *)bp->doorbell_base + i * 0x80; bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id; B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); @@ -338,9 +334,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) goto err_out; PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n"); rxr->ag_prod = 0; - rxr->ag_doorbell = - (char *)pci_dev->mem_resource[2].addr + - map_idx * 0x80; + rxr->ag_doorbell = (char *)bp->doorbell_base + map_idx * 0x80; bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id; B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); @@ -353,7 +347,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) } B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); - rxq->index = idx; + rxq->index = i; } for (i = 0; i < bp->tx_cp_nr_rings; i++) { @@ -362,7 +356,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) struct bnxt_ring *cp_ring = cpr->cp_ring_struct; struct bnxt_tx_ring_info *txr = txq->tx_ring; struct bnxt_ring *ring = txr->tx_ring_struct; - unsigned int idx = i + 1 + bp->rx_cp_nr_rings; + unsigned int idx = i + bp->rx_cp_nr_rings; /* Tx cmpl */ rc = bnxt_hwrm_ring_alloc(bp, cp_ring, @@ -372,8 +366,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) if (rc) goto err_out; - cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr + - idx * 0x80; + cpr->cp_doorbell = (char *)bp->doorbell_base + idx * 0x80; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); /* Tx ring */ @@ -384,8 +377,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) if (rc) goto err_out; - txr->tx_doorbell = (char *)pci_dev->mem_resource[2].addr + - idx * 0x80; + txr->tx_doorbell = (char *)bp->doorbell_base + idx * 0x80; txq->index = idx; } diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h index ebf7228e..65bf3e2f 100644 --- a/drivers/net/bnxt/bnxt_ring.h +++ b/drivers/net/bnxt/bnxt_ring.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_RING_H_ @@ -56,6 +28,7 @@ #define BNXT_TPA_MAX 64 #define AGG_RING_SIZE_FACTOR 2 +#define AGG_RING_MULTIPLIER 2 /* These assume 4k pages */ #define MAX_RX_DESC_CNT (8 * 1024) @@ -93,8 +66,8 @@ struct bnxt_cp_ring_info; void bnxt_free_ring(struct bnxt_ring *ring); int bnxt_init_ring_grps(struct bnxt *bp); int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, - struct bnxt_tx_ring_info *tx_ring_info, - struct bnxt_rx_ring_info *rx_ring_info, + struct bnxt_tx_queue *txq, + struct bnxt_rx_queue *rxq, struct bnxt_cp_ring_info *cp_ring_info, const char *suffix); int bnxt_alloc_hwrm_rings(struct bnxt *bp); diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c index d49f3546..c55ddec4 100644 --- a/drivers/net/bnxt/bnxt_rxq.c +++ b/drivers/net/bnxt/bnxt_rxq.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -51,10 +23,8 @@ void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq) { - struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - - if (cpr->hw_stats) - cpr->hw_stats = NULL; + if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats) + rxq->cp_ring->hw_stats = NULL; } int bnxt_mq_rx_configure(struct bnxt *bp) @@ -107,6 +77,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp) switch (dev_conf->rxmode.mq_mode) { case ETH_MQ_RX_VMDQ_RSS: case ETH_MQ_RX_VMDQ_ONLY: + /* FALLTHROUGH */ /* ETH_8/64_POOLs */ pools = conf->nb_queue_pools; /* For each pool, allocate MACVLAN CFA rule & VNIC */ @@ -237,7 +208,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) if (rxq) { sw_ring = rxq->rx_ring->rx_buf_ring; if (sw_ring) { - for (i = 0; i < rxq->nb_rx_desc; i++) { + for (i = 0; + i < rxq->rx_ring->rx_ring_struct->ring_size; i++) { if (sw_ring[i].mbuf) { rte_pktmbuf_free_seg(sw_ring[i].mbuf); sw_ring[i].mbuf = NULL; @@ -247,7 +219,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) /* Free up mbufs in Agg ring */ sw_ring = rxq->rx_ring->ag_buf_ring; if (sw_ring) { - for (i = 0; i < rxq->nb_rx_desc; i++) { + for (i = 0; + i < rxq->rx_ring->ag_ring_struct->ring_size; i++) { if (sw_ring[i].mbuf) { rte_pktmbuf_free_seg(sw_ring[i].mbuf); sw_ring[i].mbuf = NULL; @@ -295,6 +268,8 @@ void bnxt_rx_queue_release_op(void *rx_queue) bnxt_free_ring(rxq->cp_ring->cp_ring_struct); bnxt_free_rxq_stats(rxq); + rte_memzone_free(rxq->mz); + rxq->mz = NULL; rte_free(rxq); } @@ -308,6 +283,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, struct rte_mempool *mp) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; struct bnxt_rx_queue *rxq; int rc = 0; @@ -315,7 +291,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, PMD_DRV_LOG(ERR, "Cannot create Rx ring %d. Only %d rings available\n", queue_idx, bp->max_rx_rings); - return -ENOSPC; + return -EINVAL; } if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) { @@ -350,12 +326,12 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, rxq->queue_id = queue_idx; rxq->port_id = eth_dev->data->port_id; - rxq->crc_len = (uint8_t)((eth_dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + rxq->crc_len = rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP ? + 0 : ETHER_CRC_LEN; eth_dev->data->rx_queues[queue_idx] = rxq; /* Allocate RX ring hardware descriptors */ - if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring, + if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, "rxr")) { PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!\n"); @@ -363,6 +339,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, rc = -ENOMEM; goto out; } + rte_atomic64_init(&rxq->rx_mbuf_alloc_fail); out: return rc; diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h index c7acaa75..8307f603 100644 --- a/drivers/net/bnxt/bnxt_rxq.h +++ b/drivers/net/bnxt/bnxt_rxq.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_RQX_H_ @@ -60,6 +32,8 @@ struct bnxt_rx_queue { uint32_t rx_buf_use_size; /* useable size */ struct bnxt_rx_ring_info *rx_ring; struct bnxt_cp_ring_info *cp_ring; + rte_atomic64_t rx_mbuf_alloc_fail; + const struct rte_memzone *mz; }; void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq); diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index aae9a635..9d884292 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -69,13 +41,14 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq, mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); if (!mbuf) { - rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); return -ENOMEM; } rx_buf->mbuf = mbuf; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; - rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); return 0; } @@ -90,7 +63,7 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq, mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); if (!mbuf) { - rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); return -ENOMEM; } @@ -101,8 +74,9 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq, rx_buf->mbuf = mbuf; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; - rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); return 0; } @@ -123,7 +97,7 @@ static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, prod_bd = &rxr->rx_desc_ring[prod]; - prod_bd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); rxr->rx_prod = prod; } @@ -143,7 +117,7 @@ static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons, prod_bd = &rxr->ag_desc_ring[prod]; cons_bd = &rxr->ag_desc_ring[cons]; - prod_bd->addr = cons_bd->addr; + prod_bd->address = cons_bd->addr; } #endif @@ -327,7 +301,7 @@ static inline struct rte_mbuf *bnxt_tpa_end( struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool); RTE_ASSERT(new_data != NULL); if (!new_data) { - rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); return NULL; } tpa_info->mbuf = new_data; @@ -490,11 +464,15 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, if (likely(RX_CMP_IP_CS_OK(rxcmp1))) mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; else mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; if (likely(RX_CMP_L4_CS_OK(rxcmp1))) mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; else mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; @@ -560,6 +538,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t prod = rxr->rx_prod; uint16_t ag_prod = rxr->ag_prod; int rc = 0; + bool evt = false; /* If Rx Q was stopped return */ if (rxq->rx_deferred_start) @@ -584,14 +563,19 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_rx_pkts++; if (rc == -EBUSY) /* partial completion */ break; + } else { + evt = + bnxt_event_hwrm_resp_handler(rxq->bp, + (struct cmpl_base *)rxcmp); } + raw_cons = NEXT_RAW_CMP(raw_cons); - if (nb_rx_pkts == nb_pkts) + if (nb_rx_pkts == nb_pkts || evt) break; } cpr->cp_raw_cons = raw_cons; - if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) { + if ((prod == rxr->rx_prod && ag_prod == rxr->ag_prod) && !evt) { /* * For PMD, there is no need to keep on pushing to REARM * the doorbell if there are no new completions @@ -600,9 +584,12 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } B_CP_DIS_DB(cpr, cpr->cp_raw_cons); - B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + if (prod != rxr->rx_prod) + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + /* Ring the AGG ring DB */ - B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + if (ag_prod != rxr->ag_prod) + B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); /* Attempt to alloc Rx buf in case of a previous allocation failure. */ if (rc == -ENOMEM) { @@ -755,7 +742,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) if (rxq->rx_buf_use_size <= size) size = rxq->rx_buf_use_size; - type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; + type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD; rxr = rxq->rx_ring; ring = rxr->rx_ring_struct; @@ -795,7 +782,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) rxr->tpa_info[i].mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); if (!rxr->tpa_info[i].mbuf) { - rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail); + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); return -ENOMEM; } } diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h index f3ed49bd..5b28f032 100644 --- a/drivers/net/bnxt/bnxt_rxr.h +++ b/drivers/net/bnxt/bnxt_rxr.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_RXR_H_ @@ -52,22 +24,36 @@ #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ ((hdr_info) & 0x1ff) -#define RX_CMP_L4_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC) +#define RX_CMP_L4_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) -#define RX_CMP_L4_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR) +#define RX_CMP_L4_CS_ERR_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR | \ + RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR) #define RX_CMP_L4_CS_OK(rxcmp1) \ (((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \ !((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS)) -#define RX_CMP_IP_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR) +#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) -#define RX_CMP_IP_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC) +#define RX_CMP_IP_CS_ERR_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR | \ + RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR) + +#define RX_CMP_IP_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) #define RX_CMP_IP_CS_OK(rxcmp1) \ (((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \ !((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS)) +#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) + enum pkt_hash_types { PKT_HASH_TYPE_NONE, /* Undefined type */ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c index bd93cc83..bbd4e78b 100644 --- a/drivers/net/bnxt/bnxt_stats.c +++ b/drivers/net/bnxt/bnxt_stats.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -172,8 +144,8 @@ static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = { tx_mcast_pkts)}, {"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output, tx_bcast_pkts)}, - {"tx_err_pkts", offsetof(struct hwrm_func_qstats_output, - tx_err_pkts)}, + {"tx_discard_pkts", offsetof(struct hwrm_func_qstats_output, + tx_discard_pkts)}, {"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output, tx_drop_pkts)}, {"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output, @@ -188,8 +160,8 @@ static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = { rx_mcast_pkts)}, {"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output, rx_bcast_pkts)}, - {"rx_err_pkts", offsetof(struct hwrm_func_qstats_output, - rx_err_pkts)}, + {"rx_discard_pkts", offsetof(struct hwrm_func_qstats_output, + rx_discard_pkts)}, {"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output, rx_drop_pkts)}, {"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output, @@ -238,7 +210,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, memset(bnxt_stats, 0, sizeof(*bnxt_stats)); if (!(bp->flags & BNXT_FLAG_INIT_DONE)) { PMD_DRV_LOG(ERR, "Device Initialization not complete!\n"); - return 0; + return -1; } for (i = 0; i < bp->rx_cp_nr_rings; i++) { @@ -249,6 +221,8 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, bnxt_stats, 1); if (unlikely(rc)) return rc; + bnxt_stats->rx_nombuf += + rte_atomic64_read(&rxq->rx_mbuf_alloc_fail); } for (i = 0; i < bp->tx_cp_nr_rings; i++) { @@ -263,13 +237,13 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats); if (unlikely(rc)) return rc; - bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail); return rc; } void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + unsigned int i; if (!(bp->flags & BNXT_FLAG_INIT_DONE)) { PMD_DRV_LOG(ERR, "Device Initialization not complete!\n"); @@ -277,7 +251,11 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) } bnxt_clear_all_hwrm_stat_ctxs(bp); - rte_atomic64_clear(&bp->rx_mbuf_alloc_fail); + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + + rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail); + } } int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, @@ -288,11 +266,6 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, unsigned int count, i; uint64_t tx_drop_pkts; - if (!(bp->flags & BNXT_FLAG_PORT_STATS)) { - PMD_DRV_LOG(ERR, "xstats not supported for VF\n"); - return 0; - } - bnxt_hwrm_port_qstats(bp); bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts); diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h index c1c83d57..b0f135a5 100644 --- a/drivers/net/bnxt/bnxt_stats.h +++ b/drivers/net/bnxt/bnxt_stats.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Broadcom Corporation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_STATS_H_ diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c index 53524346..b9b975e4 100644 --- a/drivers/net/bnxt/bnxt_txq.c +++ b/drivers/net/bnxt/bnxt_txq.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -47,10 +19,8 @@ void bnxt_free_txq_stats(struct bnxt_tx_queue *txq) { - struct bnxt_cp_ring_info *cpr = txq->cp_ring; - - if (cpr->hw_stats) - cpr->hw_stats = NULL; + if (txq && txq->cp_ring && txq->cp_ring->hw_stats) + txq->cp_ring->hw_stats = NULL; } static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq) @@ -58,6 +28,9 @@ static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq) struct bnxt_sw_tx_bd *sw_ring; uint16_t i; + if (!txq) + return; + sw_ring = txq->tx_ring->tx_buf_ring; if (sw_ring) { for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) { @@ -93,6 +66,8 @@ void bnxt_tx_queue_release_op(void *tx_queue) bnxt_free_ring(txq->cp_ring->cp_ring_struct); bnxt_free_txq_stats(txq); + rte_memzone_free(txq->mz); + txq->mz = NULL; rte_free(txq); } @@ -112,7 +87,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, PMD_DRV_LOG(ERR, "Cannot create Tx ring %d. Only %d rings available\n", queue_idx, bp->max_tx_rings); - return -ENOSPC; + return -EINVAL; } if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) { @@ -147,7 +122,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, txq->port_id = eth_dev->data->port_id; /* Allocate TX ring hardware descriptors */ - if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring, + if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, "txr")) { PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!"); bnxt_tx_queue_release_op(txq); diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h index e27c34fa..720ca90c 100644 --- a/drivers/net/bnxt/bnxt_txq.h +++ b/drivers/net/bnxt/bnxt_txq.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_TXQ_H_ @@ -50,7 +22,6 @@ struct bnxt_tx_queue { uint8_t pthresh; /* Prefetch threshold register */ uint8_t hthresh; /* Host threshold register */ uint8_t wthresh; /* Write-back threshold reg */ - uint32_t txq_flags; /* Holds flags for this TXq */ uint32_t ctx_curr; /* Hardware context states */ uint8_t tx_deferred_start; /* not in global dev start */ @@ -61,6 +32,7 @@ struct bnxt_tx_queue { unsigned int cp_nr_rings; struct bnxt_cp_ring_info *cp_ring; + const struct rte_memzone *mz; }; void bnxt_free_txq_stats(struct bnxt_tx_queue *txq); diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c index 2c81a37c..470fddd5 100644 --- a/drivers/net/bnxt/bnxt_txr.c +++ b/drivers/net/bnxt/bnxt_txr.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -181,7 +153,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K; else txbd->flags_type |= lhint_arr[txbd->len >> 9]; - txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf)); + txbd->address = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf)); if (long_bd) { txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG; @@ -262,7 +234,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, tx_buf = &txr->tx_buf_ring[txr->tx_prod]; txbd = &txr->tx_desc_ring[txr->tx_prod]; - txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg)); + txbd->address = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg)); txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT; txbd->len = m_seg->data_len; diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h index d88b15ab..15c7e5a0 100644 --- a/drivers/net/bnxt/bnxt_txr.h +++ b/drivers/net/bnxt/bnxt_txr.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_TXR_H_ diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c index d4aeb4ca..19d06af5 100644 --- a/drivers/net/bnxt/bnxt_vnic.c +++ b/drivers/net/bnxt/bnxt_vnic.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Broadcom Corporation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #include @@ -77,6 +49,8 @@ void bnxt_init_vnics(struct bnxt *bp) vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; + vnic->hash_mode = + HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT; for (j = 0; j < MAX_QUEUES_PER_VNIC; j++) vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE; @@ -185,10 +159,10 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) mz = rte_memzone_lookup(mz_name); if (!mz) { mz = rte_memzone_reserve(mz_name, - entry_length * max_vnics, - SOCKET_ID_ANY, - RTE_MEMZONE_2MB | - RTE_MEMZONE_SIZE_HINT_ONLY); + entry_length * max_vnics, SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY | + RTE_MEMZONE_IOVA_CONTIG); if (!mz) return -ENOMEM; } diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h index d8d35c7d..c521d7e5 100644 --- a/drivers/net/bnxt/bnxt_vnic.h +++ b/drivers/net/bnxt/bnxt_vnic.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Broadcom Corporation. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. */ #ifndef _BNXT_VNIC_H_ @@ -53,6 +25,7 @@ struct bnxt_vnic_info { uint16_t dflt_ring_grp; uint16_t mru; uint16_t hash_type; + uint8_t hash_mode; rte_iova_t rss_table_dma_addr; uint16_t *rss_table; rte_iova_t rss_hash_key_dma_addr; diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h index 1e9c39f4..fd6d8807 100644 --- a/drivers/net/bnxt/hsi_struct_def_dpdk.h +++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h @@ -1,12516 +1,28102 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2014-2018 Broadcom Limited + * All rights reserved. * - * Copyright(c) 2001-2017 Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * DO NOT MODIFY!!! This file is automatically generated. */ -#ifndef _HSI_STRUCT_DEF_DPDK_ -#define _HSI_STRUCT_DEF_DPDK_ -/* HSI and HWRM Specification 1.8.2 */ -#define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 8 -#define HWRM_VERSION_UPDATE 2 +#ifndef _HSI_STRUCT_DEF_DPDK_H_ +#define _HSI_STRUCT_DEF_DPDK_H_ -#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */ +/* This is the HWRM command header. */ +/* hwrm_cmd_hdr (size:128b/16B) */ +struct hwrm_cmd_hdr { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __attribute__((packed)); -#define HWRM_VERSION_STR "1.8.2.0" -/* - * Following is the signature for HWRM message field that indicates not - * applicable (All F's). Need to cast it the size of the field if needed. - */ -#define HWRM_NA_SIGNATURE ((uint32_t)(-1)) -#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ -#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */ -#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ -#define HW_HASH_KEY_SIZE 40 -#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ -#define HWRM_ROCE_SP_HSI_VERSION_MAJOR 1 -#define HWRM_ROCE_SP_HSI_VERSION_MINOR 8 -#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 2 +/* This is the HWRM response header. */ +/* hwrm_resp_hdr (size:64b/8B) */ +struct hwrm_resp_hdr { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; +} __attribute__((packed)); /* - * Request types + * TLV encapsulated message. Use the TLV type field of the + * TLV to determine the type of message encapsulated. */ -#define HWRM_VER_GET (UINT32_C(0x0)) -#define HWRM_FUNC_BUF_UNRGTR (UINT32_C(0xe)) -#define HWRM_FUNC_VF_CFG (UINT32_C(0xf)) - /* Reserved for future use */ -#define RESERVED1 (UINT32_C(0x10)) -#define HWRM_FUNC_RESET (UINT32_C(0x11)) -#define HWRM_FUNC_GETFID (UINT32_C(0x12)) -#define HWRM_FUNC_VF_ALLOC (UINT32_C(0x13)) -#define HWRM_FUNC_VF_FREE (UINT32_C(0x14)) -#define HWRM_FUNC_QCAPS (UINT32_C(0x15)) -#define HWRM_FUNC_QCFG (UINT32_C(0x16)) -#define HWRM_FUNC_CFG (UINT32_C(0x17)) -#define HWRM_FUNC_QSTATS (UINT32_C(0x18)) -#define HWRM_FUNC_CLR_STATS (UINT32_C(0x19)) -#define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a)) -#define HWRM_FUNC_VF_RESC_FREE (UINT32_C(0x1b)) -#define HWRM_FUNC_VF_VNIC_IDS_QUERY (UINT32_C(0x1c)) -#define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d)) -#define HWRM_FUNC_DRV_QVER (UINT32_C(0x1e)) -#define HWRM_FUNC_BUF_RGTR (UINT32_C(0x1f)) -#define HWRM_PORT_PHY_CFG (UINT32_C(0x20)) -#define HWRM_PORT_MAC_CFG (UINT32_C(0x21)) -#define HWRM_PORT_QSTATS (UINT32_C(0x23)) -#define HWRM_PORT_LPBK_QSTATS (UINT32_C(0x24)) -#define HWRM_PORT_CLR_STATS (UINT32_C(0x25)) -#define HWRM_PORT_PHY_QCFG (UINT32_C(0x27)) -#define HWRM_PORT_MAC_QCFG (UINT32_C(0x28)) -#define HWRM_PORT_MAC_PTP_QCFG (UINT32_C(0x29)) -#define HWRM_PORT_PHY_QCAPS (UINT32_C(0x2a)) -#define HWRM_PORT_LED_CFG (UINT32_C(0x2d)) -#define HWRM_PORT_LED_QCFG (UINT32_C(0x2e)) -#define HWRM_PORT_LED_QCAPS (UINT32_C(0x2f)) -#define HWRM_QUEUE_QPORTCFG (UINT32_C(0x30)) -#define HWRM_QUEUE_QCFG (UINT32_C(0x31)) -#define HWRM_QUEUE_CFG (UINT32_C(0x32)) -#define HWRM_FUNC_VLAN_CFG (UINT32_C(0x33)) -#define HWRM_FUNC_VLAN_QCFG (UINT32_C(0x34)) -#define HWRM_QUEUE_PFCENABLE_QCFG (UINT32_C(0x35)) -#define HWRM_QUEUE_PFCENABLE_CFG (UINT32_C(0x36)) -#define HWRM_QUEUE_PRI2COS_QCFG (UINT32_C(0x37)) -#define HWRM_QUEUE_PRI2COS_CFG (UINT32_C(0x38)) -#define HWRM_QUEUE_COS2BW_QCFG (UINT32_C(0x39)) -#define HWRM_QUEUE_COS2BW_CFG (UINT32_C(0x3a)) -#define HWRM_VNIC_ALLOC (UINT32_C(0x40)) -#define HWRM_VNIC_ALLOC (UINT32_C(0x40)) -#define HWRM_VNIC_FREE (UINT32_C(0x41)) -#define HWRM_VNIC_CFG (UINT32_C(0x42)) -#define HWRM_VNIC_QCFG (UINT32_C(0x43)) -#define HWRM_VNIC_TPA_CFG (UINT32_C(0x44)) -#define HWRM_VNIC_RSS_CFG (UINT32_C(0x46)) -#define HWRM_VNIC_RSS_QCFG (UINT32_C(0x47)) -#define HWRM_VNIC_PLCMODES_CFG (UINT32_C(0x48)) -#define HWRM_VNIC_PLCMODES_QCFG (UINT32_C(0x49)) -#define HWRM_VNIC_QCAPS (UINT32_C(0x4a)) -#define HWRM_RING_ALLOC (UINT32_C(0x50)) -#define HWRM_RING_FREE (UINT32_C(0x51)) -#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (UINT32_C(0x52)) -#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (UINT32_C(0x53)) -#define HWRM_RING_RESET (UINT32_C(0x5e)) -#define HWRM_RING_GRP_ALLOC (UINT32_C(0x60)) -#define HWRM_RING_GRP_FREE (UINT32_C(0x61)) -#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (UINT32_C(0x70)) -#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (UINT32_C(0x71)) -#define HWRM_CFA_L2_FILTER_ALLOC (UINT32_C(0x90)) -#define HWRM_CFA_L2_FILTER_FREE (UINT32_C(0x91)) -#define HWRM_CFA_L2_FILTER_CFG (UINT32_C(0x92)) -#define HWRM_CFA_L2_SET_RX_MASK (UINT32_C(0x93)) - /* Reserved for future use */ -#define HWRM_CFA_VLAN_ANTISPOOF_CFG (UINT32_C(0x94)) -#define HWRM_CFA_TUNNEL_FILTER_ALLOC (UINT32_C(0x95)) -#define HWRM_CFA_TUNNEL_FILTER_FREE (UINT32_C(0x96)) -#define HWRM_CFA_NTUPLE_FILTER_ALLOC (UINT32_C(0x99)) -#define HWRM_CFA_NTUPLE_FILTER_FREE (UINT32_C(0x9a)) -#define HWRM_CFA_NTUPLE_FILTER_CFG (UINT32_C(0x9b)) -#define HWRM_CFA_EM_FLOW_ALLOC (UINT32_C(0x9c)) -#define HWRM_CFA_EM_FLOW_FREE (UINT32_C(0x9d)) -#define HWRM_CFA_EM_FLOW_CFG (UINT32_C(0x9e)) -#define HWRM_TUNNEL_DST_PORT_QUERY (UINT32_C(0xa0)) -#define HWRM_TUNNEL_DST_PORT_ALLOC (UINT32_C(0xa1)) -#define HWRM_TUNNEL_DST_PORT_FREE (UINT32_C(0xa2)) -#define HWRM_STAT_CTX_ALLOC (UINT32_C(0xb0)) -#define HWRM_STAT_CTX_FREE (UINT32_C(0xb1)) -#define HWRM_STAT_CTX_QUERY (UINT32_C(0xb2)) -#define HWRM_STAT_CTX_CLR_STATS (UINT32_C(0xb3)) -#define HWRM_FW_RESET (UINT32_C(0xc0)) -#define HWRM_FW_QSTATUS (UINT32_C(0xc1)) -#define HWRM_EXEC_FWD_RESP (UINT32_C(0xd0)) -#define HWRM_REJECT_FWD_RESP (UINT32_C(0xd1)) -#define HWRM_FWD_RESP (UINT32_C(0xd2)) -#define HWRM_FWD_ASYNC_EVENT_CMPL (UINT32_C(0xd3)) -#define HWRM_TEMP_MONITOR_QUERY (UINT32_C(0xe0)) -#define HWRM_WOL_FILTER_ALLOC (UINT32_C(0xf0)) -#define HWRM_WOL_FILTER_FREE (UINT32_C(0xf1)) -#define HWRM_WOL_FILTER_QCFG (UINT32_C(0xf2)) -#define HWRM_WOL_REASON_QCFG (UINT32_C(0xf3)) -#define HWRM_DBG_DUMP (UINT32_C(0xff14)) -#define HWRM_NVM_VALIDATE_OPTION (UINT32_C(0xffef)) -#define HWRM_NVM_FLUSH (UINT32_C(0xfff0)) -#define HWRM_NVM_GET_VARIABLE (UINT32_C(0xfff1)) -#define HWRM_NVM_SET_VARIABLE (UINT32_C(0xfff2)) -#define HWRM_NVM_INSTALL_UPDATE (UINT32_C(0xfff3)) -#define HWRM_NVM_MODIFY (UINT32_C(0xfff4)) -#define HWRM_NVM_VERIFY_UPDATE (UINT32_C(0xfff5)) -#define HWRM_NVM_GET_DEV_INFO (UINT32_C(0xfff6)) -#define HWRM_NVM_ERASE_DIR_ENTRY (UINT32_C(0xfff7)) -#define HWRM_NVM_MOD_DIR_ENTRY (UINT32_C(0xfff8)) -#define HWRM_NVM_FIND_DIR_ENTRY (UINT32_C(0xfff9)) -#define HWRM_NVM_GET_DIR_ENTRIES (UINT32_C(0xfffa)) -#define HWRM_NVM_GET_DIR_INFO (UINT32_C(0xfffb)) -#define HWRM_NVM_RAW_DUMP (UINT32_C(0xfffc)) -#define HWRM_NVM_READ (UINT32_C(0xfffd)) -#define HWRM_NVM_WRITE (UINT32_C(0xfffe)) -#define HWRM_NVM_RAW_WRITE_BLK (UINT32_C(0xffff)) +#define CMD_DISCR_TLV_ENCAP UINT32_C(0x8000) +#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP -/* - * Note: The Host Software Interface (HSI) and Hardware Resource Manager (HWRM) - * specification describes the data structures used in Ethernet packet or RDMA - * message data transfers as well as an abstract interface for managing Ethernet - * NIC hardware resources. - */ -/* Ethernet Data path Host Structures */ -/* - * Description: The following three sections document the host structures used - * between device and software drivers for communicating Ethernet packets. - */ -/* BD Ring Structures */ -/* - * Description: This structure is used to inform the NIC of a location for and - * an aggregation buffer that will be used for packet data that is received. An - * aggregation buffer creates a different kind of completion operation for a - * packet where a variable number of BDs may be used to place the packet in the - * host. RX Rings that have aggregation buffers are known as aggregation rings - * and must contain only aggregation buffers. - */ -/* Short TX BD (16 bytes) */ -struct tx_bd_short { - uint16_t flags_type; + +/* HWRM request message */ +#define TLV_TYPE_HWRM_REQUEST UINT32_C(0x1) +/* HWRM response message */ +#define TLV_TYPE_HWRM_RESPONSE UINT32_C(0x2) +/* RoCE slow path command */ +#define TLV_TYPE_ROCE_SP_COMMAND UINT32_C(0x3) +/* Engine CKV - The device's serial number. */ +#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER UINT32_C(0x8001) +/* Engine CKV - Per-function random nonce data. */ +#define TLV_TYPE_ENGINE_CKV_NONCE UINT32_C(0x8002) +/* Engine CKV - Initialization vector. */ +#define TLV_TYPE_ENGINE_CKV_IV UINT32_C(0x8003) +/* Engine CKV - Authentication tag. */ +#define TLV_TYPE_ENGINE_CKV_AUTH_TAG UINT32_C(0x8004) +/* Engine CKV - The encrypted data. */ +#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT UINT32_C(0x8005) +/* Engine CKV - Supported algorithms. */ +#define TLV_TYPE_ENGINE_CKV_ALGORITHMS UINT32_C(0x8006) +/* Engine CKV - The EC curve name and ECC public key information. */ +#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY UINT32_C(0x8007) +/* Engine CKV - The ECDSA signature. */ +#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE UINT32_C(0x8008) +#define TLV_TYPE_LAST \ + TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE + + +/* tlv (size:64b/8B) */ +struct tlv { + /* + * The command discriminator is used to differentiate between various + * types of HWRM messages. This includes legacy HWRM and RoCE slowpath + * command messages as well as newer TLV encapsulated HWRM commands. + * + * For TLV encapsulated messages this field must be 0x8000. + */ + uint16_t cmd_discr; + uint8_t reserved_8b; + uint8_t flags; + /* + * Indicates the presence of additional TLV encapsulated data + * follows this TLV. + */ + #define TLV_FLAGS_MORE UINT32_C(0x1) + /* Last TLV in a sequence of TLVs. */ + #define TLV_FLAGS_MORE_LAST UINT32_C(0x0) + /* More TLVs follow this TLV. */ + #define TLV_FLAGS_MORE_NOT_LAST UINT32_C(0x1) + /* + * When an HWRM receiver detects a TLV type that it does not + * support with the TLV required flag set, the receiver must + * reject the HWRM message with an error code indicating an + * unsupported TLV type. + */ + #define TLV_FLAGS_REQUIRED UINT32_C(0x2) + /* No */ + #define TLV_FLAGS_REQUIRED_NO (UINT32_C(0x0) << 1) + /* Yes */ + #define TLV_FLAGS_REQUIRED_YES (UINT32_C(0x1) << 1) + #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES + /* + * This field defines the TLV type value which is divided into + * two ranges to differentiate between global and local TLV types. + * Global TLV types must be unique across all defined TLV types. + * Local TLV types are valid only for extensions to a given + * HWRM message and may be repeated across different HWRM message + * types. There is a direct correlation of each HWRM message type + * to a single global TLV type value. + * + * Global TLV range: `0 - (63k-1)` + * + * Local TLV range: `63k - (64k-1)` + */ + uint16_t tlv_type; + /* + * Length of the message data encapsulated by this TLV in bytes. + * This length does not include the size of the TLV header itself + * and it must be an integer multiple of 8B. + */ + uint16_t length; +} __attribute__((packed)); + +/* Input */ +/* input (size:128b/16B) */ +struct input { /* - * All bits in this field must be valid on the first BD of a - * packet. Only the packet_end bit must be valid for the - * remaining BDs of a packet. + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. */ - /* This value identifies the type of buffer descriptor. */ - #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f) - #define TX_BD_SHORT_TYPE_SFT 0 + uint16_t req_type; /* - * Indicates that this BD is 16B long and is - * used for normal L2 packet transmission. + * This value indicates the what completion ring the request will + * be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. */ - #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0) + uint16_t cmpl_ring; + /* This value indicates the command sequence number. */ + uint16_t seq_id; /* - * If set to 1, the packet ends with the data in the buffer - * pointed to by this descriptor. This flag must be valid on - * every BD. + * Target ID of this command. + * + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM */ - #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40) + uint16_t target_id; /* - * If set to 1, the device will not generate a completion for - * this transmit packet unless there is an error in it's - * processing. If this bit is set to 0, then the packet will be - * completed normally. This bit must be valid only on the first - * BD of a packet. + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. */ - #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80) - /* - * This value indicates how many 16B BD locations are consumed - * in the ring by this packet. A value of 1 indicates that this - * BD is the only BD (and that the it is a short BD). A value of - * 3 indicates either 3 short BDs or 1 long BD and one short BD - * in the packet. A value of 0 indicates that there are 32 BD - * locations in the packet (the maximum). This field is valid - * only on the first BD of a packet. - */ - #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) - #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8 - /* - * This value is a hint for the length of the entire packet. It - * is used by the chip to optimize internal processing. The - * packet will be dropped if the hint is too short. This field - * is valid only on the first BD of a packet. - */ - #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000) - #define TX_BD_SHORT_FLAGS_LHINT_SFT 13 - /* indicates packet length < 512B */ - #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) - /* indicates 512 <= packet length < 1KB */ - #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) - /* indicates 1KB <= packet length < 2KB */ - #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) - /* indicates packet length >= 2KB */ - #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) - #define TX_BD_SHORT_FLAGS_LHINT_LAST \ - TX_BD_SHORT_FLAGS_LHINT_GTE2K + uint64_t resp_addr; +} __attribute__((packed)); + +/* Output */ +/* output (size:64b/8B) */ +struct output { /* - * If set to 1, the device immediately updates the Send Consumer - * Index after the buffer associated with this descriptor has - * been transferred via DMA to NIC memory from host memory. An - * interrupt may or may not be generated according to the state - * of the interrupt avoidance mechanisms. If this bit is set to - * 0, then the Consumer Index is only updated as soon as one of - * the host interrupt coalescing conditions has been met. This - * bit must be valid on the first BD of a packet. + * Pass/Fail or error type + * + * Note: receiver to verify the in parameters, and fail the call + * with an error when appropriate */ - #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000) + uint16_t error_code; + /* This field returns the type of original request. */ + uint16_t req_type; + /* This field provides original sequence number of the command. */ + uint16_t seq_id; /* - * All bits in this field must be valid on the first BD of a - * packet. Only the packet_end bit must be valid for the - * remaining BDs of a packet. + * This field is the length of the response in bytes. The + * last byte of the response is a valid flag that will read + * as '1' when the command has been completely written to + * memory. */ - #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0) - #define TX_BD_SHORT_FLAGS_SFT 6 - uint16_t len; + uint16_t resp_len; +} __attribute__((packed)); + +/* Short Command Structure */ +/* hwrm_short_input (size:128b/16B) */ +struct hwrm_short_input { /* - * This is the length of the host physical buffer this BD - * describes in bytes. This field must be valid on all BDs of a - * packet. + * This field indicates the type of request in the request buffer. + * The format for the rest of the command (request) is determined + * by this field. */ - uint32_t opaque; + uint16_t req_type; /* - * The opaque data field is pass through to the completion and - * can be used for any data that the driver wants to associate - * with the transmit BD. This field must be valid on the first - * BD of a packet. + * This field indicates a signature that is used to identify short + * form of the command listed here. This field shall be set to + * 17185 (0x4321). */ - uint64_t addr; + uint16_t signature; + /* Signature indicating this is a short form of HWRM command */ + #define HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD UINT32_C(0x4321) + #define HWRM_SHORT_INPUT_SIGNATURE_LAST \ + HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD + /* Reserved for future use. */ + uint16_t unused_0; + /* This value indicates the length of the request. */ + uint16_t size; /* - * This is the host physical address for the portion of the - * packet described by this TX BD. This value must be valid on - * all BDs of a packet. + * This is the host address where the request was written. + * This area must be 16B aligned. */ + uint64_t req_addr; } __attribute__((packed)); -/* Long TX BD (32 bytes split to 2 16-byte struct) */ -struct tx_bd_long { - uint16_t flags_type; - /* - * All bits in this field must be valid on the first BD of a - * packet. Only the packet_end bit must be valid for the - * remaining BDs of a packet. +/* + * Command numbering + * # NOTE - definitions already in hwrm_req_type, in hwrm_types.yaml + * # So only structure definition is provided here. + */ +/* cmd_nums (size:64b/8B) */ +struct cmd_nums { + /* + * This version of the specification defines the commands listed in + * the table below. The following are general implementation + * requirements for these commands: + * + * # All commands listed below that are marked neither + * reserved nor experimental shall be implemented by the HWRM. + * # A HWRM client compliant to this specification should not use + * commands outside of the list below. + * # A HWRM client compliant to this specification should not use + * command numbers marked reserved below. + * # A command marked experimental below may not be implemented + * by the HWRM. + * # A command marked experimental may change in the + * future version of the HWRM specification. + * # A command not listed below may be implemented by the HWRM. + * The behavior of commands that are not listed below is outside + * the scope of this specification. */ - /* This value identifies the type of buffer descriptor. */ - #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f) - #define TX_BD_LONG_TYPE_SFT 0 + uint16_t req_type; + #define HWRM_VER_GET UINT32_C(0x0) + #define HWRM_FUNC_BUF_UNRGTR UINT32_C(0xe) + #define HWRM_FUNC_VF_CFG UINT32_C(0xf) + /* Reserved for future use. */ + #define HWRM_RESERVED1 UINT32_C(0x10) + #define HWRM_FUNC_RESET UINT32_C(0x11) + #define HWRM_FUNC_GETFID UINT32_C(0x12) + #define HWRM_FUNC_VF_ALLOC UINT32_C(0x13) + #define HWRM_FUNC_VF_FREE UINT32_C(0x14) + #define HWRM_FUNC_QCAPS UINT32_C(0x15) + #define HWRM_FUNC_QCFG UINT32_C(0x16) + #define HWRM_FUNC_CFG UINT32_C(0x17) + #define HWRM_FUNC_QSTATS UINT32_C(0x18) + #define HWRM_FUNC_CLR_STATS UINT32_C(0x19) + #define HWRM_FUNC_DRV_UNRGTR UINT32_C(0x1a) + #define HWRM_FUNC_VF_RESC_FREE UINT32_C(0x1b) + #define HWRM_FUNC_VF_VNIC_IDS_QUERY UINT32_C(0x1c) + #define HWRM_FUNC_DRV_RGTR UINT32_C(0x1d) + #define HWRM_FUNC_DRV_QVER UINT32_C(0x1e) + #define HWRM_FUNC_BUF_RGTR UINT32_C(0x1f) + #define HWRM_PORT_PHY_CFG UINT32_C(0x20) + #define HWRM_PORT_MAC_CFG UINT32_C(0x21) + /* Experimental */ + #define HWRM_PORT_TS_QUERY UINT32_C(0x22) + #define HWRM_PORT_QSTATS UINT32_C(0x23) + #define HWRM_PORT_LPBK_QSTATS UINT32_C(0x24) + /* Experimental */ + #define HWRM_PORT_CLR_STATS UINT32_C(0x25) + /* Experimental */ + #define HWRM_PORT_LPBK_CLR_STATS UINT32_C(0x26) + #define HWRM_PORT_PHY_QCFG UINT32_C(0x27) + #define HWRM_PORT_MAC_QCFG UINT32_C(0x28) + /* Experimental */ + #define HWRM_PORT_MAC_PTP_QCFG UINT32_C(0x29) + #define HWRM_PORT_PHY_QCAPS UINT32_C(0x2a) + #define HWRM_PORT_PHY_I2C_WRITE UINT32_C(0x2b) + #define HWRM_PORT_PHY_I2C_READ UINT32_C(0x2c) + #define HWRM_PORT_LED_CFG UINT32_C(0x2d) + #define HWRM_PORT_LED_QCFG UINT32_C(0x2e) + #define HWRM_PORT_LED_QCAPS UINT32_C(0x2f) + #define HWRM_QUEUE_QPORTCFG UINT32_C(0x30) + #define HWRM_QUEUE_QCFG UINT32_C(0x31) + #define HWRM_QUEUE_CFG UINT32_C(0x32) + #define HWRM_FUNC_VLAN_CFG UINT32_C(0x33) + #define HWRM_FUNC_VLAN_QCFG UINT32_C(0x34) + #define HWRM_QUEUE_PFCENABLE_QCFG UINT32_C(0x35) + #define HWRM_QUEUE_PFCENABLE_CFG UINT32_C(0x36) + #define HWRM_QUEUE_PRI2COS_QCFG UINT32_C(0x37) + #define HWRM_QUEUE_PRI2COS_CFG UINT32_C(0x38) + #define HWRM_QUEUE_COS2BW_QCFG UINT32_C(0x39) + #define HWRM_QUEUE_COS2BW_CFG UINT32_C(0x3a) + /* Experimental */ + #define HWRM_QUEUE_DSCP_QCAPS UINT32_C(0x3b) + /* Experimental */ + #define HWRM_QUEUE_DSCP2PRI_QCFG UINT32_C(0x3c) + /* Experimental */ + #define HWRM_QUEUE_DSCP2PRI_CFG UINT32_C(0x3d) + #define HWRM_VNIC_ALLOC UINT32_C(0x40) + #define HWRM_VNIC_FREE UINT32_C(0x41) + #define HWRM_VNIC_CFG UINT32_C(0x42) + #define HWRM_VNIC_QCFG UINT32_C(0x43) + #define HWRM_VNIC_TPA_CFG UINT32_C(0x44) + /* Experimental */ + #define HWRM_VNIC_TPA_QCFG UINT32_C(0x45) + #define HWRM_VNIC_RSS_CFG UINT32_C(0x46) + #define HWRM_VNIC_RSS_QCFG UINT32_C(0x47) + #define HWRM_VNIC_PLCMODES_CFG UINT32_C(0x48) + #define HWRM_VNIC_PLCMODES_QCFG UINT32_C(0x49) + #define HWRM_VNIC_QCAPS UINT32_C(0x4a) + #define HWRM_RING_ALLOC UINT32_C(0x50) + #define HWRM_RING_FREE UINT32_C(0x51) + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS UINT32_C(0x52) + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS UINT32_C(0x53) + #define HWRM_RING_RESET UINT32_C(0x5e) + #define HWRM_RING_GRP_ALLOC UINT32_C(0x60) + #define HWRM_RING_GRP_FREE UINT32_C(0x61) + /* Reserved for future use. */ + #define HWRM_RESERVED5 UINT32_C(0x64) + /* Reserved for future use. */ + #define HWRM_RESERVED6 UINT32_C(0x65) + #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC UINT32_C(0x70) + #define HWRM_VNIC_RSS_COS_LB_CTX_FREE UINT32_C(0x71) + #define HWRM_CFA_L2_FILTER_ALLOC UINT32_C(0x90) + #define HWRM_CFA_L2_FILTER_FREE UINT32_C(0x91) + #define HWRM_CFA_L2_FILTER_CFG UINT32_C(0x92) + #define HWRM_CFA_L2_SET_RX_MASK UINT32_C(0x93) + #define HWRM_CFA_VLAN_ANTISPOOF_CFG UINT32_C(0x94) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC UINT32_C(0x95) + #define HWRM_CFA_TUNNEL_FILTER_FREE UINT32_C(0x96) + /* Experimental */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC UINT32_C(0x97) + /* Experimental */ + #define HWRM_CFA_ENCAP_RECORD_FREE UINT32_C(0x98) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC UINT32_C(0x99) + #define HWRM_CFA_NTUPLE_FILTER_FREE UINT32_C(0x9a) + #define HWRM_CFA_NTUPLE_FILTER_CFG UINT32_C(0x9b) + /* Experimental */ + #define HWRM_CFA_EM_FLOW_ALLOC UINT32_C(0x9c) + /* Experimental */ + #define HWRM_CFA_EM_FLOW_FREE UINT32_C(0x9d) + /* Experimental */ + #define HWRM_CFA_EM_FLOW_CFG UINT32_C(0x9e) + #define HWRM_TUNNEL_DST_PORT_QUERY UINT32_C(0xa0) + #define HWRM_TUNNEL_DST_PORT_ALLOC UINT32_C(0xa1) + #define HWRM_TUNNEL_DST_PORT_FREE UINT32_C(0xa2) + #define HWRM_STAT_CTX_ALLOC UINT32_C(0xb0) + #define HWRM_STAT_CTX_FREE UINT32_C(0xb1) + #define HWRM_STAT_CTX_QUERY UINT32_C(0xb2) + #define HWRM_STAT_CTX_CLR_STATS UINT32_C(0xb3) + #define HWRM_PORT_QSTATS_EXT UINT32_C(0xb4) + #define HWRM_FW_RESET UINT32_C(0xc0) + #define HWRM_FW_QSTATUS UINT32_C(0xc1) + /* Experimental */ + #define HWRM_FW_SET_TIME UINT32_C(0xc8) + /* Experimental */ + #define HWRM_FW_GET_TIME UINT32_C(0xc9) + /* Experimental */ + #define HWRM_FW_SET_STRUCTURED_DATA UINT32_C(0xca) + /* Experimental */ + #define HWRM_FW_GET_STRUCTURED_DATA UINT32_C(0xcb) + /* Experimental */ + #define HWRM_FW_IPC_MAILBOX UINT32_C(0xcc) + #define HWRM_EXEC_FWD_RESP UINT32_C(0xd0) + #define HWRM_REJECT_FWD_RESP UINT32_C(0xd1) + #define HWRM_FWD_RESP UINT32_C(0xd2) + #define HWRM_FWD_ASYNC_EVENT_CMPL UINT32_C(0xd3) + #define HWRM_OEM_CMD UINT32_C(0xd4) + #define HWRM_TEMP_MONITOR_QUERY UINT32_C(0xe0) + #define HWRM_WOL_FILTER_ALLOC UINT32_C(0xf0) + #define HWRM_WOL_FILTER_FREE UINT32_C(0xf1) + #define HWRM_WOL_FILTER_QCFG UINT32_C(0xf2) + #define HWRM_WOL_REASON_QCFG UINT32_C(0xf3) + /* Experimental */ + #define HWRM_CFA_METER_PROFILE_ALLOC UINT32_C(0xf5) + /* Experimental */ + #define HWRM_CFA_METER_PROFILE_FREE UINT32_C(0xf6) + /* Experimental */ + #define HWRM_CFA_METER_PROFILE_CFG UINT32_C(0xf7) + /* Experimental */ + #define HWRM_CFA_METER_INSTANCE_ALLOC UINT32_C(0xf8) + /* Experimental */ + #define HWRM_CFA_METER_INSTANCE_FREE UINT32_C(0xf9) + /* Experimental */ + #define HWRM_CFA_VFR_ALLOC UINT32_C(0xfd) + /* Experimental */ + #define HWRM_CFA_VFR_FREE UINT32_C(0xfe) + /* Experimental */ + #define HWRM_CFA_VF_PAIR_ALLOC UINT32_C(0x100) + /* Experimental */ + #define HWRM_CFA_VF_PAIR_FREE UINT32_C(0x101) + /* Experimental */ + #define HWRM_CFA_VF_PAIR_INFO UINT32_C(0x102) + /* Experimental */ + #define HWRM_CFA_FLOW_ALLOC UINT32_C(0x103) + /* Experimental */ + #define HWRM_CFA_FLOW_FREE UINT32_C(0x104) + /* Experimental */ + #define HWRM_CFA_FLOW_FLUSH UINT32_C(0x105) + /* Experimental */ + #define HWRM_CFA_FLOW_STATS UINT32_C(0x106) + /* Experimental */ + #define HWRM_CFA_FLOW_INFO UINT32_C(0x107) + /* Experimental */ + #define HWRM_CFA_DECAP_FILTER_ALLOC UINT32_C(0x108) + /* Experimental */ + #define HWRM_CFA_DECAP_FILTER_FREE UINT32_C(0x109) + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG UINT32_C(0x10a) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC UINT32_C(0x10b) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE UINT32_C(0x10c) + /* Experimental */ + #define HWRM_CFA_PAIR_ALLOC UINT32_C(0x10d) + /* Experimental */ + #define HWRM_CFA_PAIR_FREE UINT32_C(0x10e) + /* Experimental */ + #define HWRM_CFA_PAIR_INFO UINT32_C(0x10f) + /* Experimental */ + #define HWRM_FW_IPC_MSG UINT32_C(0x110) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO UINT32_C(0x111) + /* Engine CKV - Ping the device and SRT firmware to get the public key. */ + #define HWRM_ENGINE_CKV_HELLO UINT32_C(0x12d) + /* Engine CKV - Get the current allocation status of keys provisioned in the key vault. */ + #define HWRM_ENGINE_CKV_STATUS UINT32_C(0x12e) + /* Engine CKV - Add a new CKEK used to encrypt keys. */ + #define HWRM_ENGINE_CKV_CKEK_ADD UINT32_C(0x12f) + /* Engine CKV - Delete a previously added CKEK. */ + #define HWRM_ENGINE_CKV_CKEK_DELETE UINT32_C(0x130) + /* Engine CKV - Add a new key to the key vault. */ + #define HWRM_ENGINE_CKV_KEY_ADD UINT32_C(0x131) + /* Engine CKV - Delete a key from the key vault. */ + #define HWRM_ENGINE_CKV_KEY_DELETE UINT32_C(0x132) + /* Engine CKV - Delete all keys from the key vault. */ + #define HWRM_ENGINE_CKV_FLUSH UINT32_C(0x133) + /* Engine CKV - Get random data. */ + #define HWRM_ENGINE_CKV_RNG_GET UINT32_C(0x134) + /* Engine CKV - Generate and encrypt a new AES key. */ + #define HWRM_ENGINE_CKV_KEY_GEN UINT32_C(0x135) + /* Engine - Query the available queue groups configuration. */ + #define HWRM_ENGINE_QG_CONFIG_QUERY UINT32_C(0x13c) + /* Engine - Query the queue groups assigned to a function. */ + #define HWRM_ENGINE_QG_QUERY UINT32_C(0x13d) + /* Engine - Query the available queue group meter profile configuration. */ + #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY UINT32_C(0x13e) + /* Engine - Query the configuration of a queue group meter profile. */ + #define HWRM_ENGINE_QG_METER_PROFILE_QUERY UINT32_C(0x13f) + /* Engine - Allocate a queue group meter profile. */ + #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC UINT32_C(0x140) + /* Engine - Free a queue group meter profile. */ + #define HWRM_ENGINE_QG_METER_PROFILE_FREE UINT32_C(0x141) + /* Engine - Query the meters assigned to a queue group. */ + #define HWRM_ENGINE_QG_METER_QUERY UINT32_C(0x142) + /* Engine - Bind a queue group meter profile to a queue group. */ + #define HWRM_ENGINE_QG_METER_BIND UINT32_C(0x143) + /* Engine - Unbind a queue group meter profile from a queue group. */ + #define HWRM_ENGINE_QG_METER_UNBIND UINT32_C(0x144) + /* Engine - Bind a queue group to a function. */ + #define HWRM_ENGINE_QG_FUNC_BIND UINT32_C(0x145) + /* Engine - Query the scheduling group configuration. */ + #define HWRM_ENGINE_SG_CONFIG_QUERY UINT32_C(0x146) + /* Engine - Query the queue groups assigned to a scheduling group. */ + #define HWRM_ENGINE_SG_QUERY UINT32_C(0x147) + /* Engine - Query the configuration of a scheduling group's meter profiles. */ + #define HWRM_ENGINE_SG_METER_QUERY UINT32_C(0x148) + /* Engine - Configure a scheduling group's meter profiles. */ + #define HWRM_ENGINE_SG_METER_CONFIG UINT32_C(0x149) + /* Engine - Bind a queue group to a scheduling group. */ + #define HWRM_ENGINE_SG_QG_BIND UINT32_C(0x14a) + /* Engine - Unbind a queue group from its scheduling group. */ + #define HWRM_ENGINE_QG_SG_UNBIND UINT32_C(0x14b) + /* Engine - Query the Engine configuration. */ + #define HWRM_ENGINE_CONFIG_QUERY UINT32_C(0x154) + /* Engine - Configure the statistics accumulator for an Engine. */ + #define HWRM_ENGINE_STATS_CONFIG UINT32_C(0x155) + /* Engine - Clear the statistics accumulator for an Engine. */ + #define HWRM_ENGINE_STATS_CLEAR UINT32_C(0x156) + /* Engine - Query the statistics accumulator for an Engine. */ + #define HWRM_ENGINE_STATS_QUERY UINT32_C(0x157) + /* Engine - Allocate an Engine RQ. */ + #define HWRM_ENGINE_RQ_ALLOC UINT32_C(0x15e) + /* Engine - Free an Engine RQ. */ + #define HWRM_ENGINE_RQ_FREE UINT32_C(0x15f) + /* Engine - Allocate an Engine CQ. */ + #define HWRM_ENGINE_CQ_ALLOC UINT32_C(0x160) + /* Engine - Free an Engine CQ. */ + #define HWRM_ENGINE_CQ_FREE UINT32_C(0x161) + /* Engine - Allocate an NQ. */ + #define HWRM_ENGINE_NQ_ALLOC UINT32_C(0x162) + /* Engine - Free an NQ. */ + #define HWRM_ENGINE_NQ_FREE UINT32_C(0x163) + /* Engine - Set the on-die RQE credit update location. */ + #define HWRM_ENGINE_ON_DIE_RQE_CREDITS UINT32_C(0x164) + /* Experimental */ + #define HWRM_FUNC_RESOURCE_QCAPS UINT32_C(0x190) + /* Experimental */ + #define HWRM_FUNC_VF_RESOURCE_CFG UINT32_C(0x191) + /* Experimental */ + #define HWRM_FUNC_BACKING_STORE_QCAPS UINT32_C(0x192) + /* Experimental */ + #define HWRM_FUNC_BACKING_STORE_CFG UINT32_C(0x193) + /* Experimental */ + #define HWRM_FUNC_BACKING_STORE_QCFG UINT32_C(0x194) + /* Experimental */ + #define HWRM_SELFTEST_QLIST UINT32_C(0x200) + /* Experimental */ + #define HWRM_SELFTEST_EXEC UINT32_C(0x201) + /* Experimental */ + #define HWRM_SELFTEST_IRQ UINT32_C(0x202) + /* Experimental */ + #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA UINT32_C(0x203) + /* Experimental */ + #define HWRM_PCIE_QSTATS UINT32_C(0x204) + /* Experimental */ + #define HWRM_DBG_READ_DIRECT UINT32_C(0xff10) + /* Experimental */ + #define HWRM_DBG_READ_INDIRECT UINT32_C(0xff11) + /* Experimental */ + #define HWRM_DBG_WRITE_DIRECT UINT32_C(0xff12) + /* Experimental */ + #define HWRM_DBG_WRITE_INDIRECT UINT32_C(0xff13) + #define HWRM_DBG_DUMP UINT32_C(0xff14) + /* Experimental */ + #define HWRM_DBG_ERASE_NVM UINT32_C(0xff15) + /* Experimental */ + #define HWRM_DBG_CFG UINT32_C(0xff16) + /* Experimental */ + #define HWRM_DBG_COREDUMP_LIST UINT32_C(0xff17) + /* Experimental */ + #define HWRM_DBG_COREDUMP_INITIATE UINT32_C(0xff18) + /* Experimental */ + #define HWRM_DBG_COREDUMP_RETRIEVE UINT32_C(0xff19) + /* */ + #define HWRM_DBG_I2C_CMD UINT32_C(0xff1b) + /* Experimental */ + #define HWRM_NVM_FACTORY_DEFAULTS UINT32_C(0xffee) + #define HWRM_NVM_VALIDATE_OPTION UINT32_C(0xffef) + #define HWRM_NVM_FLUSH UINT32_C(0xfff0) + #define HWRM_NVM_GET_VARIABLE UINT32_C(0xfff1) + #define HWRM_NVM_SET_VARIABLE UINT32_C(0xfff2) + #define HWRM_NVM_INSTALL_UPDATE UINT32_C(0xfff3) + #define HWRM_NVM_MODIFY UINT32_C(0xfff4) + #define HWRM_NVM_VERIFY_UPDATE UINT32_C(0xfff5) + #define HWRM_NVM_GET_DEV_INFO UINT32_C(0xfff6) + #define HWRM_NVM_ERASE_DIR_ENTRY UINT32_C(0xfff7) + #define HWRM_NVM_MOD_DIR_ENTRY UINT32_C(0xfff8) + #define HWRM_NVM_FIND_DIR_ENTRY UINT32_C(0xfff9) + #define HWRM_NVM_GET_DIR_ENTRIES UINT32_C(0xfffa) + #define HWRM_NVM_GET_DIR_INFO UINT32_C(0xfffb) + #define HWRM_NVM_RAW_DUMP UINT32_C(0xfffc) + #define HWRM_NVM_READ UINT32_C(0xfffd) + #define HWRM_NVM_WRITE UINT32_C(0xfffe) + #define HWRM_NVM_RAW_WRITE_BLK UINT32_C(0xffff) + #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Return Codes */ +/* ret_codes (size:64b/8B) */ +struct ret_codes { + uint16_t error_code; + /* Request was successfully executed by the HWRM. */ + #define HWRM_ERR_CODE_SUCCESS UINT32_C(0x0) + /* The HWRM failed to execute the request. */ + #define HWRM_ERR_CODE_FAIL UINT32_C(0x1) /* - * Indicates that this BD is 32B long and is - * used for normal L2 packet transmission. + * The request contains invalid argument(s) or input + * parameters. */ - #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10) + #define HWRM_ERR_CODE_INVALID_PARAMS UINT32_C(0x2) /* - * If set to 1, the packet ends with the data in the buffer - * pointed to by this descriptor. This flag must be valid on - * every BD. + * The requester is not allowed to access the requested + * resource. This error code shall be provided in a + * response to a request to query or modify an existing + * resource that is not accessible by the requester. */ - #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40) + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED UINT32_C(0x3) /* - * If set to 1, the device will not generate a completion for - * this transmit packet unless there is an error in it's - * processing. If this bit is set to 0, then the packet will be - * completed normally. This bit must be valid only on the first - * BD of a packet. + * The HWRM is unable to allocate the requested resource. + * This code only applies to requests for HWRM resource + * allocations. */ - #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80) - /* - * This value indicates how many 16B BD locations are consumed - * in the ring by this packet. A value of 1 indicates that this - * BD is the only BD (and that the it is a short BD). A value of - * 3 indicates either 3 short BDs or 1 long BD and one short BD - * in the packet. A value of 0 indicates that there are 32 BD - * locations in the packet (the maximum). This field is valid - * only on the first BD of a packet. - */ - #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) - #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8 - /* - * This value is a hint for the length of the entire packet. It - * is used by the chip to optimize internal processing. The - * packet will be dropped if the hint is too short. This field - * is valid only on the first BD of a packet. - */ - #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000) - #define TX_BD_LONG_FLAGS_LHINT_SFT 13 - /* indicates packet length < 512B */ - #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) - /* indicates 512 <= packet length < 1KB */ - #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) - /* indicates 1KB <= packet length < 2KB */ - #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) - /* indicates packet length >= 2KB */ - #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) - #define TX_BD_LONG_FLAGS_LHINT_LAST \ - TX_BD_LONG_FLAGS_LHINT_GTE2K + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR UINT32_C(0x4) /* - * If set to 1, the device immediately updates the Send Consumer - * Index after the buffer associated with this descriptor has - * been transferred via DMA to NIC memory from host memory. An - * interrupt may or may not be generated according to the state - * of the interrupt avoidance mechanisms. If this bit is set to - * 0, then the Consumer Index is only updated as soon as one of - * the host interrupt coalescing conditions has been met. This - * bit must be valid on the first BD of a packet. + * Invalid combination of flags is specified in the + * request. */ - #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000) + #define HWRM_ERR_CODE_INVALID_FLAGS UINT32_C(0x5) /* - * All bits in this field must be valid on the first BD of a - * packet. Only the packet_end bit must be valid for the - * remaining BDs of a packet. + * Invalid combination of enables fields is specified in + * the request. */ - #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0) - #define TX_BD_LONG_FLAGS_SFT 6 - uint16_t len; + #define HWRM_ERR_CODE_INVALID_ENABLES UINT32_C(0x6) /* - * This is the length of the host physical buffer this BD - * describes in bytes. This field must be valid on all BDs of a - * packet. + * Request contains a required TLV that is not supported by + * the installed version of firmware. */ - uint32_t opaque; + #define HWRM_ERR_CODE_UNSUPPORTED_TLV UINT32_C(0x7) /* - * The opaque data field is pass through to the completion and - * can be used for any data that the driver wants to associate - * with the transmit BD. This field must be valid on the first - * BD of a packet. + * No firmware buffer available to accept the request. Driver + * should retry the request. */ - uint64_t addr; + #define HWRM_ERR_CODE_NO_BUFFER UINT32_C(0x8) /* - * This is the host physical address for the portion of the - * packet described by this TX BD. This value must be valid on - * all BDs of a packet. + * Generic HWRM execution error that represents an + * internal error. */ + #define HWRM_ERR_CODE_HWRM_ERROR UINT32_C(0xf) + /* Unknown error */ + #define HWRM_ERR_CODE_UNKNOWN_ERR UINT32_C(0xfffe) + /* Unsupported or invalid command */ + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED UINT32_C(0xffff) + #define HWRM_ERR_CODE_LAST \ + HWRM_ERR_CODE_CMD_NOT_SUPPORTED + uint16_t unused_0[3]; } __attribute__((packed)); -/* last 16 bytes of Long TX BD */ -struct tx_bd_long_hi { - uint16_t lflags; - /* - * All bits in this field must be valid on the first BD of a - * packet. Their value on other BDs of the packet will be - * ignored. - */ - /* - * If set to 1, the controller replaces the TCP/UPD checksum - * fields of normal TCP/UPD checksum, or the inner TCP/UDP - * checksum field of the encapsulated TCP/UDP packets with the - * hardware calculated TCP/UDP checksum for the packet - * associated with this descriptor. The flag is ignored if the - * LSO flag is set. This bit must be valid on the first BD of a - * packet. - */ - #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1) +/* Output */ +/* hwrm_err_output (size:128b/16B) */ +struct hwrm_err_output { /* - * If set to 1, the controller replaces the IP checksum of the - * normal packets, or the inner IP checksum of the encapsulated - * packets with the hardware calculated IP checksum for the - * packet associated with this descriptor. This bit must be - * valid on the first BD of a packet. + * Pass/Fail or error type + * + * Note: receiver to verify the in parameters, and fail the call + * with an error when appropriate */ - #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2) + uint16_t error_code; + /* This field returns the type of original request. */ + uint16_t req_type; + /* This field provides original sequence number of the command. */ + uint16_t seq_id; /* - * If set to 1, the controller will not append an Ethernet CRC - * to the end of the frame. This bit must be valid on the first - * BD of a packet. Packet must be 64B or longer when this flag - * is set. It is not useful to use this bit with any form of TX - * offload such as CSO or LSO. The intent is that the packet - * from the host already has a valid Ethernet CRC on the packet. + * This field is the length of the response in bytes. The + * last byte of the response is a valid flag that will read + * as '1' when the command has been completely written to + * memory. */ - #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4) + uint16_t resp_len; + /* debug info for this error response. */ + uint32_t opaque_0; + /* debug info for this error response. */ + uint16_t opaque_1; /* - * If set to 1, the device will record the time at which the - * packet was actually transmitted at the TX MAC. This bit must - * be valid on the first BD of a packet. + * In the case of an error response, command specific error + * code is returned in this field. */ - #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8) + uint8_t cmd_err; /* - * If set to 1, The controller replaces the tunnel IP checksum - * field with hardware calculated IP checksum for the IP header - * of the packet associated with this descriptor. For outer UDP - * checksum, global outer UDP checksum TE_NIC register needs to - * be enabled. If the global outer UDP checksum TE_NIC register - * bit is set, outer UDP checksum will be calculated for the - * following cases: 1. Packets with tcp_udp_chksum flag set to - * offload checksum for inner packet AND the inner packet is - * TCP/UDP. If the inner packet is ICMP for example (non- - * TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP - * checksum will not be calculated. 2. Packets with lso flag set - * which implies inner TCP checksum calculation as part of LSO - * operation. - */ - #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10) - /* - * If set to 1, the device will treat this packet with LSO(Large - * Send Offload) processing for both normal or encapsulated - * packets, which is a form of TCP segmentation. When this bit - * is 1, the hdr_size and mss fields must be valid. The driver - * doesn't need to set t_ip_chksum, ip_chksum, and - * tcp_udp_chksum flags since the controller will replace the - * appropriate checksum fields for segmented packets. When this - * bit is 1, the hdr_size and mss fields must be valid. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20) + uint8_t valid; +} __attribute__((packed)); +/* + * Following is the signature for HWRM message field that indicates not + * applicable (All F's). Need to cast it the size of the field if needed. + */ +#define HWRM_NA_SIGNATURE ((uint32_t)(-1)) +/* hwrm_func_buf_rgtr */ +#define HWRM_MAX_REQ_LEN 128 +/* hwrm_selftest_qlist */ +#define HWRM_MAX_RESP_LEN 280 +/* 7 bit indirection table index. */ +#define HW_HASH_INDEX_SIZE 0x80 +#define HW_HASH_KEY_SIZE 40 +/* valid key for HWRM response */ +#define HWRM_RESP_VALID_KEY 1 +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 9 +#define HWRM_VERSION_UPDATE 2 +/* non-zero means beta version */ +#define HWRM_VERSION_RSVD 6 +#define HWRM_VERSION_STR "1.9.2.6" + +/**************** + * hwrm_ver_get * + ****************/ + + +/* hwrm_ver_get_input (size:192b/24B) */ +struct hwrm_ver_get_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If set to zero when LSO is '1', then the IPID will be treated - * as a 16b number and will be wrapped if it exceeds a value of - * 0xffff. If set to one when LSO is '1', then the IPID will be - * treated as a 15b number and will be wrapped if it exceeds a - * value 0f 0x7fff. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40) + uint16_t cmpl_ring; /* - * If set to zero when LSO is '1', then the IPID of the tunnel - * IP header will not be modified during LSO operations. If set - * to one when LSO is '1', then the IPID of the tunnel IP header - * will be incremented for each subsequent segment of an LSO - * operation. The flag is ignored if the LSO packet is a normal - * (non-tunneled) TCP packet. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80) + uint16_t seq_id; /* - * If set to '1', then the RoCE ICRC will be appended to the - * packet. Packet must be a valid RoCE format packet. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100) + uint16_t target_id; /* - * If set to '1', then the FCoE CRC will be appended to the - * packet. Packet must be a valid FCoE format packet. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200) - uint16_t hdr_size; + uint64_t resp_addr; /* - * When LSO is '1', this field must contain the offset of the - * TCP payload from the beginning of the packet in as 16b words. - * In case of encapsulated/tunneling packet, this field contains - * the offset of the inner TCP payload from beginning of the - * packet as 16-bit words. This value must be valid on the first - * BD of a packet. + * This field represents the major version of HWRM interface + * specification supported by the driver HWRM implementation. + * The interface major version is intended to change only when + * non backward compatible changes are made to the HWRM + * interface specification. */ - #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff) - #define TX_BD_LONG_HDR_SIZE_SFT 0 - uint32_t mss; + uint8_t hwrm_intf_maj; /* - * This is the MSS value that will be used to do the LSO - * processing. The value is the length in bytes of the TCP - * payload for each segment generated by the LSO operation. This - * value must be valid on the first BD of a packet. + * This field represents the minor version of HWRM interface + * specification supported by the driver HWRM implementation. + * A change in interface minor version is used to reflect + * significant backward compatible modification to HWRM + * interface specification. + * This can be due to addition or removal of functionality. + * HWRM interface specifications with the same major version + * but different minor versions are compatible. */ - #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff) - #define TX_BD_LONG_MSS_SFT 0 - uint16_t unused_2; - uint16_t cfa_action; + uint8_t hwrm_intf_min; /* - * This value selects a CFA action to perform on the packet. Set - * this value to zero if no CFA action is desired. This value - * must be valid on the first BD of a packet. + * This field represents the update version of HWRM interface + * specification supported by the driver HWRM implementation. + * The interface update version is used to reflect minor + * changes or bug fixes to a released HWRM interface + * specification. */ - uint32_t cfa_meta; + uint8_t hwrm_intf_upd; + uint8_t unused_0[5]; +} __attribute__((packed)); + +/* hwrm_ver_get_output (size:1408b/176B) */ +struct hwrm_ver_get_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * This value is action meta-data that defines CFA edit - * operations that are done in addition to any action editing. + * This field represents the major version of HWRM interface + * specification supported by the HWRM implementation. + * The interface major version is intended to change only when + * non backward compatible changes are made to the HWRM + * interface specification. + * A HWRM implementation that is compliant with this + * specification shall provide value of 1 in this field. */ - /* When key=1, This is the VLAN tag VID value. */ - #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff) - #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0 - /* When key=1, This is the VLAN tag DE value. */ - #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000) - /* When key=1, This is the VLAN tag PRI value. */ - #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000) - #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13 - /* When key=1, This is the VLAN tag TPID select value. */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000) - #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16 - /* 0x88a8 */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16) - /* 0x8100 */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16) - /* 0x9100 */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16) - /* 0x9200 */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16) - /* 0x9300 */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16) - /* Value programmed in CFA VLANTPID register. */ - #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16) - #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \ - TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG - /* When key=1, This is the VLAN tag TPID select value. */ - #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000) - #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19 + uint8_t hwrm_intf_maj_8b; /* - * This field identifies the type of edit to be performed on the - * packet. This value must be valid on the first BD of a packet. + * This field represents the minor version of HWRM interface + * specification supported by the HWRM implementation. + * A change in interface minor version is used to reflect + * significant backward compatible modification to HWRM + * interface specification. + * This can be due to addition or removal of functionality. + * HWRM interface specifications with the same major version + * but different minor versions are compatible. + * A HWRM implementation that is compliant with this + * specification shall provide value of 2 in this field. */ - #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000) - #define TX_BD_LONG_CFA_META_KEY_SFT 28 - /* No editing */ - #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28) + uint8_t hwrm_intf_min_8b; /* - * - meta[17:16] - TPID select value (0 = - * 0x8100). - meta[15:12] - PRI/DE value. - - * meta[11:0] - VID value. + * This field represents the update version of HWRM interface + * specification supported by the HWRM implementation. + * The interface update version is used to reflect minor + * changes or bug fixes to a released HWRM interface + * specification. + * A HWRM implementation that is compliant with this + * specification shall provide value of 2 in this field. */ - #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28) - #define TX_BD_LONG_CFA_META_KEY_LAST \ - TX_BD_LONG_CFA_META_KEY_VLAN_TAG -} __attribute__((packed)); - -/* RX Producer Packet BD (16 bytes) */ -struct rx_prod_pkt_bd { - uint16_t flags_type; - /* This value identifies the type of buffer descriptor. */ - #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f) - #define RX_PROD_PKT_BD_TYPE_SFT 0 + uint8_t hwrm_intf_upd_8b; + uint8_t hwrm_intf_rsvd_8b; /* - * Indicates that this BD is 16B long and is an - * RX Producer (ie. empty) buffer descriptor. + * This field represents the major version of HWRM firmware. + * A change in firmware major version represents a major + * firmware release. */ - #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4) + uint8_t hwrm_fw_maj_8b; /* - * If set to 1, the packet will be placed at the address plus - * 2B. The 2 Bytes of padding will be written as zero. + * This field represents the minor version of HWRM firmware. + * A change in firmware minor version represents significant + * firmware functionality changes. */ + uint8_t hwrm_fw_min_8b; /* - * This is intended to be used when the host buffer is cache- - * line aligned to produce packets that are easy to parse in - * host memory while still allowing writes to be cache line - * aligned. + * This field represents the build version of HWRM firmware. + * A change in firmware build version represents bug fixes + * to a released firmware. */ - #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40) + uint8_t hwrm_fw_bld_8b; /* - * If set to 1, the packet write will be padded out to the - * nearest cache-line with zero value padding. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version of the + * HWRM firmware. */ + uint8_t hwrm_fw_rsvd_8b; /* - * If receive buffers start/end on cache-line boundaries, this - * feature will ensure that all data writes on the PCI bus - * start/end on cache line boundaries. + * This field represents the major version of mgmt firmware. + * A change in major version represents a major release. */ - #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80) + uint8_t mgmt_fw_maj_8b; /* - * This value is the number of additional buffers in the ring - * that describe the buffer space to be consumed for the this - * packet. If the value is zero, then the packet must fit within - * the space described by this BD. If this value is 1 or more, - * it indicates how many additional "buffer" BDs are in the ring - * immediately following this BD to be used for the same network - * packet. Even if the packet to be placed does not need all the - * additional buffers, they will be consumed anyway. + * This field represents the minor version of mgmt firmware. + * A change in minor version represents significant + * functionality changes. */ - #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300) - #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8 - #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0) - #define RX_PROD_PKT_BD_FLAGS_SFT 6 - uint16_t len; + uint8_t mgmt_fw_min_8b; /* - * This is the length in Bytes of the host physical buffer where - * data for the packet may be placed in host memory. + * This field represents the build version of mgmt firmware. + * A change in update version represents bug fixes. */ + uint8_t mgmt_fw_bld_8b; /* - * While this is a Byte resolution value, it is often - * advantageous to ensure that the buffers provided end on a - * host cache line. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version */ - uint32_t opaque; + uint8_t mgmt_fw_rsvd_8b; /* - * The opaque data field is pass through to the completion and - * can be used for any data that the driver wants to associate - * with this receive buffer set. + * This field represents the major version of network + * control firmware. + * A change in major version represents a major release. */ - uint64_t addr; + uint8_t netctrl_fw_maj_8b; /* - * This is the host physical address where data for the packet - * may by placed in host memory. + * This field represents the minor version of network + * control firmware. + * A change in minor version represents significant + * functionality changes. */ + uint8_t netctrl_fw_min_8b; /* - * While this is a Byte resolution value, it is often - * advantageous to ensure that the buffers provide start on a - * host cache line. + * This field represents the build version of network + * control firmware. + * A change in update version represents bug fixes. */ -} __attribute__((packed)); - -/* Completion Ring Structures */ -/* Note: This structure is used by the HWRM to communicate HWRM Error. */ -/* Base Completion Record (16 bytes) */ -struct cmpl_base { - uint16_t type; - /* unused is 10 b */ + uint8_t netctrl_fw_bld_8b; /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version */ - #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f) - #define CMPL_BASE_TYPE_SFT 0 - /* TX L2 completion: Completion of TX packet. Length = 16B */ - #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0) + uint8_t netctrl_fw_rsvd_8b; /* - * RX L2 completion: Completion of and L2 RX - * packet. Length = 32B + * This field is used to indicate device's capabilities and + * configurations. */ - #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11) + uint32_t dev_caps_cfg; /* - * RX Aggregation Buffer completion : Completion - * of an L2 aggregation buffer in support of - * TPA, HDS, or Jumbo packet completion. Length - * = 16B + * If set to 1, then secure firmware update behavior + * is supported. + * If set to 0, then secure firmware update behavior is + * not supported. */ - #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12) + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \ + UINT32_C(0x1) /* - * RX L2 TPA Start Completion: Completion at the - * beginning of a TPA operation. Length = 32B + * If set to 1, then firmware based DCBX agent is supported. + * If set to 0, then firmware based DCBX agent capability + * is not supported on this device. */ - #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13) + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \ + UINT32_C(0x2) /* - * RX L2 TPA End Completion: Completion at the - * end of a TPA operation. Length = 32B + * If set to 1, then HWRM short command format is supported. + * If set to 0, then HWRM short command format is not supported. */ - #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15) + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \ + UINT32_C(0x4) /* - * Statistics Ejection Completion: Completion of - * statistics data ejection buffer. Length = 16B + * If set to 1, then HWRM short command format is required. + * If set to 0, then HWRM short command format is not required. */ - #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a) - /* HWRM Command Completion: Completion of an HWRM command. */ - #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20) - /* Forwarded HWRM Request */ - #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22) - /* Forwarded HWRM Response */ - #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24) - /* HWRM Asynchronous Event Information */ - #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e) - /* CQ Notification */ - #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30) - /* SRQ Threshold Event */ - #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32) - /* DBQ Threshold Event */ - #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34) - /* QP Async Notification */ - #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38) - /* Function Async Notification */ - #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a) - /* unused is 10 b */ - uint16_t info1; - /* info1 is 16 b */ - uint32_t info2; - /* info2 is 32 b */ - uint32_t info3_v; - /* info3 is 31 b */ - /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. - */ - #define CMPL_BASE_V UINT32_C(0x1) - /* info3 is 31 b */ - #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe) - #define CMPL_BASE_INFO3_SFT 1 - uint32_t info4; - /* info4 is 32 b */ -} __attribute__((packed)); - -/* TX Completion Record (16 bytes) */ -struct tx_cmpl { - uint16_t flags_type; + #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED \ + UINT32_C(0x8) /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * This field represents the major version of RoCE firmware. + * A change in major version represents a major release. */ - #define TX_CMPL_TYPE_MASK UINT32_C(0x3f) - #define TX_CMPL_TYPE_SFT 0 - /* TX L2 completion: Completion of TX packet. Length = 16B */ - #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0) + uint8_t roce_fw_maj_8b; /* - * When this bit is '1', it indicates a packet that has an error - * of some type. Type of error is indicated in error_flags. + * This field represents the minor version of RoCE firmware. + * A change in minor version represents significant + * functionality changes. */ - #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40) + uint8_t roce_fw_min_8b; /* - * When this bit is '1', it indicates that the packet completed - * was transmitted using the push acceleration data provided by - * the driver. When this bit is '0', it indicates that the - * packet had not push acceleration data written or was executed - * as a normal packet even though push data was provided. + * This field represents the build version of RoCE firmware. + * A change in update version represents bug fixes. */ - #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80) - #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0) - #define TX_CMPL_FLAGS_SFT 6 - uint16_t unused_0; - /* unused1 is 16 b */ - uint32_t opaque; + uint8_t roce_fw_bld_8b; /* - * This is a copy of the opaque field from the first TX BD of - * this transmitted packet. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version */ - uint16_t errors_v; + uint8_t roce_fw_rsvd_8b; /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * This field represents the name of HWRM FW (ASCII chars + * with NULL at the end). */ - #define TX_CMPL_V UINT32_C(0x1) + char hwrm_fw_name[16]; /* - * This error indicates that there was some sort of problem with - * the BDs for the packet. + * This field represents the name of mgmt FW (ASCII chars + * with NULL at the end). */ - #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) - #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1 - /* No error */ - #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1) - /* Bad Format: BDs were not formatted correctly. */ - #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1) - #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \ - TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT + char mgmt_fw_name[16]; /* - * When this bit is '1', it indicates that the length of the - * packet was zero. No packet was transmitted. + * This field represents the name of network control + * firmware (ASCII chars with NULL at the end). */ - #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10) + char netctrl_fw_name[16]; /* - * When this bit is '1', it indicates that the packet was longer - * than the programmed limit in TDI. No packet was transmitted. + * This field is reserved for future use. + * The responder should set it to 0. + * The requester should ignore this field. */ - #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20) + uint8_t reserved2[16]; /* - * When this bit is '1', it indicates that one or more of the - * BDs associated with this packet generated a PCI error. This - * probably means the address was not valid. + * This field represents the name of RoCE FW (ASCII chars + * with NULL at the end). */ - #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40) + char roce_fw_name[16]; + /* This field returns the chip number. */ + uint16_t chip_num; + /* This field returns the revision of chip. */ + uint8_t chip_rev; + /* This field returns the chip metal number. */ + uint8_t chip_metal; + /* This field returns the bond id of the chip. */ + uint8_t chip_bond_id; + /* This value indicates the type of platform used for chip implementation. */ + uint8_t chip_platform_type; + /* ASIC */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0) + /* FPGA platform of the chip. */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1) + /* Palladium platform of the chip. */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2) + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_LAST \ + HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM /* - * When this bit is '1', it indicates that the packet was longer - * than indicated by the hint. No packet was transmitted. + * This field returns the maximum value of request window that + * is supported by the HWRM. The request window is mapped + * into device address space using MMIO. */ - #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80) + uint16_t max_req_win_len; /* - * When this bit is '1', it indicates that the packet was - * dropped due to Poison TLP error on one or more of the TLPs in - * the PXP completion. + * This field returns the maximum value of response buffer in + * bytes. */ - #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100) - #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe) - #define TX_CMPL_ERRORS_SFT 1 - uint16_t unused_1; - /* unused2 is 16 b */ - uint32_t unused_2; - /* unused3 is 32 b */ -} __attribute__((packed)); - -/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */ -struct rx_pkt_cmpl { - uint16_t flags_type; + uint16_t max_resp_len; /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * This field returns the default request timeout value in + * milliseconds. */ - #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f) - #define RX_PKT_CMPL_TYPE_SFT 0 + uint16_t def_req_timeout; /* - * RX L2 completion: Completion of and L2 RX - * packet. Length = 32B + * This field will indicate if any subsystems is not fully + * initialized. */ - #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11) + uint8_t flags; /* - * When this bit is '1', it indicates a packet that has an error - * of some type. Type of error is indicated in error_flags. + * If set to 1, device is not ready. + * If set to 0, device is ready to accept all HWRM commands. */ - #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40) - /* This field indicates how the packet was placed in the buffer. */ - #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) - #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7 - /* Normal: Packet was placed using normal algorithm. */ - #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7) - /* Jumbo: Packet was placed using jumbo algorithm. */ - #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) - /* - * Header/Data Separation: Packet was placed - * using Header/Data separation algorithm. The - * separation location is indicated by the itype - * field. + #define HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY UINT32_C(0x1) + /* + * If set to 1, external version present. + * If set to 0, external version not present. */ - #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) - #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \ - RX_PKT_CMPL_FLAGS_PLACEMENT_HDS - /* This bit is '1' if the RSS field in this completion is valid. */ - #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) - /* unused is 1 b */ - #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800) + #define HWRM_VER_GET_OUTPUT_FLAGS_EXT_VER_AVAIL UINT32_C(0x2) + uint8_t unused_0[2]; /* - * This value indicates what the inner packet determined for the - * packet was. + * For backward compatibility this field must be set to 1. + * Older drivers might look for this field to be 1 before + * processing the message. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) - #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12 - /* Not Known: Indicates that the packet type was not known. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN (UINT32_C(0x0) << 12) + uint8_t always_1; /* - * IP Packet: Indicates that the packet was an - * IP packet, but further classification was not - * possible. + * This field represents the major version of HWRM interface + * specification supported by the HWRM implementation. + * The interface major version is intended to change only when + * non backward compatible changes are made to the HWRM + * interface specification. A HWRM implementation that is + * compliant with this specification shall provide value of 1 + * in this field. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_IP (UINT32_C(0x1) << 12) + uint16_t hwrm_intf_major; /* - * TCP Packet: Indicates that the packet was IP - * and TCP. This indicates that the - * payload_offset field is valid. + * This field represents the minor version of HWRM interface + * specification supported by the HWRM implementation. + * A change in interface minor version is used to reflect + * significant backward compatible modification to HWRM + * interface specification. This can be due to addition or + * removal of functionality. HWRM interface specifications + * with the same major version but different minor versions are + * compatible. A HWRM implementation that is compliant with + * this specification shall provide value of 2 in this field. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12) + uint16_t hwrm_intf_minor; /* - * UDP Packet: Indicates that the packet was IP - * and UDP. This indicates that the - * payload_offset field is valid. + * This field represents the update version of HWRM interface + * specification supported by the HWRM implementation. The + * interface update version is used to reflect minor changes or + * bug fixes to a released HWRM interface specification. + * A HWRM implementation that is compliant with this + * specification shall provide value of 2 in this field. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_UDP (UINT32_C(0x3) << 12) + uint16_t hwrm_intf_build; /* - * FCoE Packet: Indicates that the packet was - * recognized as a FCoE. This also indicates - * that the payload_offset field is valid. + * This field represents the patch version of HWRM interface + * specification supported by the HWRM implementation. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (UINT32_C(0x4) << 12) + uint16_t hwrm_intf_patch; /* - * RoCE Packet: Indicates that the packet was - * recognized as a RoCE. This also indicates - * that the payload_offset field is valid. + * This field represents the major version of HWRM firmware. + * A change in firmware major version represents a major + * firmware release. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (UINT32_C(0x5) << 12) + uint16_t hwrm_fw_major; /* - * ICMP Packet: Indicates that the packet was - * recognized as ICMP. This indicates that the - * payload_offset field is valid. + * This field represents the minor version of HWRM firmware. + * A change in firmware minor version represents significant + * firmware functionality changes. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (UINT32_C(0x7) << 12) + uint16_t hwrm_fw_minor; /* - * PtP packet wo/timestamp: Indicates that the - * packet was recognized as a PtP packet. + * This field represents the build version of HWRM firmware. + * A change in firmware build version represents bug fixes to + * a released firmware. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP (UINT32_C(0x8) << 12) + uint16_t hwrm_fw_build; /* - * PtP packet w/timestamp: Indicates that the - * packet was recognized as a PtP packet and - * that a timestamp was taken for the packet. + * This field is a reserved field. + * This field can be used to represent firmware branches or customer + * specific releases tied to a specific (major,minor,update) version + * of the HWRM firmware. */ - #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12) - #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \ - RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP - #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0) - #define RX_PKT_CMPL_FLAGS_SFT 6 - uint16_t len; + uint16_t hwrm_fw_patch; /* - * This is the length of the data for the packet stored in the - * buffer(s) identified by the opaque value. This includes the - * packet BD and any associated buffer BDs. This does not - * include the length of any data places in aggregation BDs. + * This field represents the major version of mgmt firmware. + * A change in major version represents a major release. */ - uint32_t opaque; + uint16_t mgmt_fw_major; /* - * This is a copy of the opaque field from the RX BD this - * completion corresponds to. + * This field represents the minor version of HWRM firmware. + * A change in firmware minor version represents significant + * firmware functionality changes. */ - uint8_t agg_bufs_v1; - /* unused1 is 2 b */ + uint16_t mgmt_fw_minor; /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * This field represents the build version of mgmt firmware. + * A change in update version represents bug fixes. */ - #define RX_PKT_CMPL_V1 UINT32_C(0x1) + uint16_t mgmt_fw_build; /* - * This value is the number of aggregation buffers that follow - * this entry in the completion ring that are a part of this - * packet. If the value is zero, then the packet is completely - * contained in the buffer space provided for the packet in the - * RX ring. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version. */ - #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e) - #define RX_PKT_CMPL_AGG_BUFS_SFT 1 - /* unused1 is 2 b */ - uint8_t rss_hash_type; - /* - * This is the RSS hash type for the packet. The value is packed - * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]} - * . The value of tuple_extrac_op provides the information about - * what fields the hash was computed on. * 0: The RSS hash was - * computed over source IP address, destination IP address, - * source port, and destination port of inner IP and TCP or UDP - * headers. Note: For non-tunneled packets, the packet headers - * are considered inner packet headers for the RSS hash - * computation purpose. * 1: The RSS hash was computed over - * source IP address and destination IP address of inner IP - * header. Note: For non-tunneled packets, the packet headers - * are considered inner packet headers for the RSS hash - * computation purpose. * 2: The RSS hash was computed over - * source IP address, destination IP address, source port, and - * destination port of IP and TCP or UDP headers of outer tunnel - * headers. Note: For non-tunneled packets, this value is not - * applicable. * 3: The RSS hash was computed over source IP - * address and destination IP address of IP header of outer - * tunnel headers. Note: For non-tunneled packets, this value is - * not applicable. Note that 4-tuples values listed above are - * applicable for layer 4 protocols supported and enabled for - * RSS in the hardware, HWRM firmware, and drivers. For example, - * if RSS hash is supported and enabled for TCP traffic only, - * then the values of tuple_extract_op corresponding to 4-tuples - * are only valid for TCP traffic. - */ - uint8_t payload_offset; - /* - * This value indicates the offset in bytes from the beginning - * of the packet where the inner payload starts. This value is - * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero - * indicates that header is 256B into the packet. - */ - uint8_t unused_1; - /* unused2 is 8 b */ - uint32_t rss_hash; + uint16_t mgmt_fw_patch; /* - * This value is the RSS hash value calculated for the packet - * based on the mode bits and key value in the VNIC. + * This field represents the major version of network control + * firmware. A change in major version represents + * a major release. */ -} __attribute__((packed)); - -/* last 16 bytes of RX Packet Completion Record */ -struct rx_pkt_cmpl_hi { - uint32_t flags2; + uint16_t netctrl_fw_major; /* - * This indicates that the ip checksum was calculated for the - * inner packet and that the ip_cs_error field indicates if - * there was an error. + * This field represents the minor version of network control + * firmware. A change in minor version represents significant + * functionality changes. */ - #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) + uint16_t netctrl_fw_minor; /* - * This indicates that the TCP, UDP or ICMP checksum was - * calculated for the inner packet and that the l4_cs_error - * field indicates if there was an error. + * This field represents the build version of network control + * firmware. A change in update version represents bug fixes. */ - #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) + uint16_t netctrl_fw_build; /* - * This indicates that the ip checksum was calculated for the - * tunnel header and that the t_ip_cs_error field indicates if - * there was an error. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version */ - #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) + uint16_t netctrl_fw_patch; /* - * This indicates that the UDP checksum was calculated for the - * tunnel packet and that the t_l4_cs_error field indicates if - * there was an error. + * This field represents the major version of RoCE firmware. + * A change in major version represents a major release. */ - #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) - /* This value indicates what format the metadata field is. */ - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4 - /* No metadata informtaion. Value is zero. */ - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4) - /* - * The metadata field contains the VLAN tag and - * TPID value. - metadata[11:0] contains the - * vlan VID value. - metadata[12] contains the - * vlan DE value. - metadata[15:13] contains the - * vlan PRI value. - metadata[31:16] contains - * the vlan TPID value. - */ - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4) - #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \ - RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN + uint16_t roce_fw_major; /* - * This field indicates the IP type for the inner-most IP - * header. A value of '0' indicates IPv4. A value of '1' - * indicates IPv6. This value is only valid if itype indicates a - * packet with an IP header. + * This field represents the minor version of RoCE firmware. + * A change in minor version represents significant + * functionality changes. */ - #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) - uint32_t metadata; + uint16_t roce_fw_minor; /* - * This is data from the CFA block as indicated by the - * meta_format field. + * This field represents the build version of RoCE firmware. + * A change in update version represents bug fixes. */ - /* When meta_format=1, this value is the VLAN VID. */ - #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff) - #define RX_PKT_CMPL_METADATA_VID_SFT 0 - /* When meta_format=1, this value is the VLAN DE. */ - #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000) - /* When meta_format=1, this value is the VLAN PRI. */ - #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) - #define RX_PKT_CMPL_METADATA_PRI_SFT 13 - /* When meta_format=1, this value is the VLAN TPID. */ - #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) - #define RX_PKT_CMPL_METADATA_TPID_SFT 16 - uint16_t errors_v2; + uint16_t roce_fw_build; /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * This field is a reserved field. This field can be used to + * represent firmware branches or customer specific releases + * tied to a specific (major,minor,update) version */ - #define RX_PKT_CMPL_V2 UINT32_C(0x1) + uint16_t roce_fw_patch; /* - * This error indicates that there was some sort of problem with - * the BDs for the packet that was found after part of the - * packet was already placed. The packet should be treated as - * invalid. + * This field returns the maximum extended request length acceptable + * by the device which allows requests greater than mailbox size when + * used with the short cmd request format. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 - /* No buffer error */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (UINT32_C(0x0) << 1) + uint16_t max_ext_req_len; + uint8_t unused_1[5]; /* - * Did Not Fit: Packet did not fit into packet - * buffer provided. For regular placement, this - * means the packet did not fit in the buffer - * provided. For HDS and jumbo placement, this - * means that the packet could not be placed - * into 7 physical buffers or less. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \ - (UINT32_C(0x1) << 1) + uint8_t valid; +} __attribute__((packed)); + +/* bd_base (size:64b/8B) */ +struct bd_base { + uint8_t type; + /* This value identifies the type of buffer descriptor. */ + #define BD_BASE_TYPE_MASK UINT32_C(0x3f) + #define BD_BASE_TYPE_SFT 0 /* - * Not On Chip: All BDs needed for the packet - * were not on-chip when the packet arrived. + * Indicates that this BD is 16B long and is used for + * normal L2 packet transmission. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ - (UINT32_C(0x2) << 1) - /* Bad Format: BDs were not formatted correctly. */ - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \ - (UINT32_C(0x3) << 1) - #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \ - RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT - /* This indicates that there was an error in the IP header checksum. */ - #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10) + #define BD_BASE_TYPE_TX_BD_SHORT UINT32_C(0x0) /* - * This indicates that there was an error in the TCP, UDP or - * ICMP checksum. + * Indicates that this BD is 1BB long and is an empty + * TX BD. Not valid for use by the driver. */ - #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR UINT32_C(0x20) + #define BD_BASE_TYPE_TX_BD_EMPTY UINT32_C(0x1) /* - * This indicates that there was an error in the tunnel IP - * header checksum. + * Indicates that this BD is 16B long and is an RX Producer + * (ie. empty) buffer descriptor. */ - #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR UINT32_C(0x40) + #define BD_BASE_TYPE_RX_PROD_PKT UINT32_C(0x4) /* - * This indicates that there was an error in the tunnel UDP - * checksum. + * Indicates that this BD is 16B long and is an RX + * Producer Buffer BD. */ - #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR UINT32_C(0x80) + #define BD_BASE_TYPE_RX_PROD_BFR UINT32_C(0x5) /* - * This indicates that there was a CRC error on either an FCoE - * or RoCE packet. The itype indicates the packet type. + * Indicates that this BD is 16B long and is an + * RX Producer Assembly Buffer Descriptor. */ - #define RX_PKT_CMPL_ERRORS_CRC_ERROR UINT32_C(0x100) + #define BD_BASE_TYPE_RX_PROD_AGG UINT32_C(0x6) /* - * This indicates that there was an error in the tunnel portion - * of the packet when this field is non-zero. + * Indicates that this BD is 32B long and is used for + * normal L2 packet transmission. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK UINT32_C(0xe00) - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9 + #define BD_BASE_TYPE_TX_BD_LONG UINT32_C(0x10) + #define BD_BASE_TYPE_LAST BD_BASE_TYPE_TX_BD_LONG + uint8_t unused_1[7]; +} __attribute__((packed)); + +/* tx_bd_short (size:128b/16B) */ +struct tx_bd_short { /* - * No additional error occurred on the tunnel - * portion of the packet of the packet does not - * have a tunnel. + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs + * of a packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 9) + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_SHORT_TYPE_SFT 0 /* - * Indicates that IP header version does not - * match expectation from L2 Ethertype for IPv4 - * and IPv6 in the tunnel header. + * Indicates that this BD is 16B long and is used for + * normal L2 packet transmission. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \ - (UINT32_C(0x1) << 9) + #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0) + #define TX_BD_SHORT_TYPE_LAST TX_BD_SHORT_TYPE_TX_BD_SHORT /* - * Indicates that header length is out of range - * in the tunnel header. Valid for IPv4. + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs + * of a packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \ - (UINT32_C(0x2) << 9) + #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0) + #define TX_BD_SHORT_FLAGS_SFT 6 /* - * Indicates that the physical packet is shorter - * than that claimed by the PPPoE header length - * for a tunnel PPPoE packet. + * If set to 1, the packet ends with the data in the buffer + * pointed to by this descriptor. This flag must be + * valid on every BD. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \ - (UINT32_C(0x3) << 9) + #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40) /* - * Indicates that physical packet is shorter - * than that claimed by the tunnel l3 header - * length. Valid for IPv4, or IPv6 tunnel packet - * packets. + * If set to 1, the device will not generate a completion for + * this transmit packet unless there is an error in it's + * processing. + * If this bit + * is set to 0, then the packet will be completed normally. + * + * This bit must be valid only on the first BD of a packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \ - (UINT32_C(0x4) << 9) + #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80) + /* + * This value indicates how many 16B BD locations are consumed + * in the ring by this packet. + * A value of 1 indicates that this BD is the only BD (and that + * the it is a short BD). A value + * of 3 indicates either 3 short BDs or 1 long BD and one short + * BD in the packet. A value of 0 indicates + * that there are 32 BD locations in the packet (the maximum). + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) + #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8 + /* + * This value is a hint for the length of the entire packet. + * It is used by the chip to optimize internal processing. + * + * The packet will be dropped if the hint is too short. + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000) + #define TX_BD_SHORT_FLAGS_LHINT_SFT 13 + /* indicates packet length < 512B */ + #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) + /* indicates 512 <= packet length < 1KB */ + #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) + /* indicates 1KB <= packet length < 2KB */ + #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) + /* indicates packet length >= 2KB */ + #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) + #define TX_BD_SHORT_FLAGS_LHINT_LAST \ + TX_BD_SHORT_FLAGS_LHINT_GTE2K /* - * Indicates that the physical packet is shorter - * than that claimed by the tunnel UDP header - * length for a tunnel UDP packet that is not - * fragmented. + * If set to 1, the device immediately updates the Send Consumer + * Index after the buffer associated with this descriptor has + * been transferred via DMA to NIC memory from host memory. An + * interrupt may or may not be generated according to the state + * of the interrupt avoidance mechanisms. If this bit + * is set to 0, then the Consumer Index is only updated as soon + * as one of the host interrupt coalescing conditions has been met. + * + * This bit must be valid on the first BD of a packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \ - (UINT32_C(0x5) << 9) + #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000) /* - * indicates that the IPv4 TTL or IPv6 hop limit - * check have failed (e.g. TTL = 0) in the - * tunnel header. Valid for IPv4, and IPv6. + * This is the length of the host physical buffer this BD describes + * in bytes. + * + * This field must be valid on all BDs of a packet. */ - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \ - (UINT32_C(0x6) << 9) - #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \ - RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL + uint16_t len; /* - * This indicates that there was an error in the inner portion - * of the packet when this field is non-zero. + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with the + * transmit BD. + * + * This field must be valid on the first BD of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK UINT32_C(0xf000) - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12 + uint32_t opaque; /* - * No additional error occurred on the tunnel - * portion of the packet of the packet does not - * have a tunnel. + * This is the host physical address for the portion of the packet + * described by this TX BD. + * + * This value must be valid on all BDs of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 12) + uint64_t address; +} __attribute__((packed)); + +/* tx_bd_long (size:128b/16B) */ +struct tx_bd_long { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; /* - * Indicates that IP header version does not - * match expectation from L2 Ethertype for IPv4 - * and IPv6 or that option other than VFT was - * parsed on FCoE packet. + * This value indicates the type of buffer descriptor. + * packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \ - (UINT32_C(0x1) << 12) + #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_LONG_TYPE_SFT 0 /* - * indicates that header length is out of range. - * Valid for IPv4 and RoCE + * Indicates that this BD is 32B long and is used for + * normal L2 packet transmission. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \ - (UINT32_C(0x2) << 12) + #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10) + #define TX_BD_LONG_TYPE_LAST TX_BD_LONG_TYPE_TX_BD_LONG /* - * indicates that the IPv4 TTL or IPv6 hop limit - * check have failed (e.g. TTL = 0). Valid for - * IPv4, and IPv6 + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs + * of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12) + #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0) + #define TX_BD_LONG_FLAGS_SFT 6 /* - * Indicates that physical packet is shorter - * than that claimed by the l3 header length. - * Valid for IPv4, IPv6 packet or RoCE packets. + * If set to 1, the packet ends with the data in the buffer + * pointed to by this descriptor. This flag must be + * valid on every BD. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \ - (UINT32_C(0x4) << 12) + #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40) /* - * Indicates that the physical packet is shorter - * than that claimed by the UDP header length - * for a UDP packet that is not fragmented. + * If set to 1, the device will not generate a completion for + * this transmit packet unless there is an error in it's + * processing. + * If this bit + * is set to 0, then the packet will be completed normally. + * + * This bit must be valid only on the first BD of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \ - (UINT32_C(0x5) << 12) + #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80) + /* + * This value indicates how many 16B BD locations are consumed + * in the ring by this packet. + * A value of 1 indicates that this BD is the only BD (and that + * the it is a short BD). A value + * of 3 indicates either 3 short BDs or 1 long BD and one short + * BD in the packet. A value of 0 indicates + * that there are 32 BD locations in the packet (the maximum). + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) + #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8 + /* + * This value is a hint for the length of the entire packet. + * It is used by the chip to optimize internal processing. + * + * The packet will be dropped if the hint is too short. + * + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000) + #define TX_BD_LONG_FLAGS_LHINT_SFT 13 + /* indicates packet length < 512B */ + #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) + /* indicates 512 <= packet length < 1KB */ + #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) + /* indicates 1KB <= packet length < 2KB */ + #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) + /* indicates packet length >= 2KB */ + #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) + #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K /* - * Indicates that TCP header length > IP - * payload. Valid for TCP packets only. + * If set to 1, the device immediately updates the Send Consumer + * Index after the buffer associated with this descriptor has + * been transferred via DMA to NIC memory from host memory. An + * interrupt may or may not be generated according to the state + * of the interrupt avoidance mechanisms. If this bit + * is set to 0, then the Consumer Index is only updated as soon + * as one of the host interrupt coalescing conditions has been met. + * + * This bit must be valid on the first BD of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \ - (UINT32_C(0x6) << 12) - /* Indicates that TCP header length < 5. Valid for TCP. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \ - (UINT32_C(0x7) << 12) + #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000) /* - * Indicates that TCP option headers result in a - * TCP header size that does not match data - * offset in TCP header. Valid for TCP. + * This is the length of the host physical buffer this BD describes + * in bytes. + * + * This field must be valid on all BDs of a packet. */ - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \ - (UINT32_C(0x8) << 12) - #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \ - RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN - #define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe) - #define RX_PKT_CMPL_ERRORS_SFT 1 - uint16_t cfa_code; + uint16_t len; /* - * This field identifies the CFA action rule that was used for - * this packet. + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with the + * transmit BD. + * + * This field must be valid on the first BD of a packet. */ - uint32_t reorder; + uint32_t opaque; /* - * This value holds the reordering sequence number for the - * packet. If the reordering sequence is not valid, then this - * value is zero. The reordering domain for the packet is in the - * bottom 8 to 10b of the rss_hash value. The bottom 20b of this - * value contain the ordering domain value for the packet. + * This is the host physical address for the portion of the packet + * described by this TX BD. + * + * This value must be valid on all BDs of a packet. */ - #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff) - #define RX_PKT_CMPL_REORDER_SFT 0 + uint64_t address; } __attribute__((packed)); -/* RX L2 TPA Start Completion Record (32 bytes split to 2 16-byte struct) */ -struct rx_tpa_start_cmpl { - uint16_t flags_type; +/* tx_bd_long_hi (size:128b/16B) */ +struct tx_bd_long_hi { /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * All bits in this field must be valid on the first BD of a packet. + * Their value on other BDs of the packet will be ignored. */ - #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f) - #define RX_TPA_START_CMPL_TYPE_SFT 0 + uint16_t lflags; /* - * RX L2 TPA Start Completion: Completion at the - * beginning of a TPA operation. Length = 32B + * If set to 1, the controller replaces the TCP/UPD checksum + * fields of normal TCP/UPD checksum, or the inner TCP/UDP + * checksum field of the encapsulated TCP/UDP packets with the + * hardware calculated TCP/UDP checksum for the packet associated + * with this descriptor. The flag is ignored if the LSO flag is set. + * + * This bit must be valid on the first BD of a packet. */ - #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13) - /* This bit will always be '0' for TPA start completions. */ - #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40) - /* This field indicates how the packet was placed in the buffer. */ - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7 + #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1) /* - * Jumbo: TPA Packet was placed using jumbo - * algorithm. This means that the first buffer - * will be filled with data before moving to - * aggregation buffers. Each aggregation buffer - * will be filled before moving to the next - * aggregation buffer. + * If set to 1, the controller replaces the IP checksum of the + * normal packets, or the inner IP checksum of the encapsulated + * packets with the hardware calculated IP checksum for the + * packet associated with this descriptor. + * + * This bit must be valid on the first BD of a packet. */ - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) + #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2) /* - * Header/Data Separation: Packet was placed - * using Header/Data separation algorithm. The - * separation location is indicated by the itype - * field. + * If set to 1, the controller will not append an Ethernet CRC + * to the end of the frame. + * + * This bit must be valid on the first BD of a packet. + * + * Packet must be 64B or longer when this flag is set. It is not + * useful to use this bit with any form of TX offload such as + * CSO or LSO. The intent is that the packet from the host already + * has a valid Ethernet CRC on the packet. */ - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) + #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4) /* - * GRO/Jumbo: Packet will be placed using - * GRO/Jumbo where the first packet is filled - * with data. Subsequent packets will be placed - * such that any one packet does not span two - * aggregation buffers unless it starts at the - * beginning of an aggregation buffer. + * If set to 1, the device will record the time at which the packet + * was actually transmitted at the TX MAC. + * + * This bit must be valid on the first BD of a packet. */ - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ - (UINT32_C(0x5) << 7) + #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8) /* - * GRO/Header-Data Separation: Packet will be - * placed using GRO/HDS where the header is in - * the first packet. Payload of each packet will - * be placed such that any one packet does not - * span two aggregation buffers unless it starts - * at the beginning of an aggregation buffer. + * If set to 1, The controller replaces the tunnel IP checksum + * field with hardware calculated IP checksum for the IP header + * of the packet associated with this descriptor. + * + * For outer UDP checksum, global outer UDP checksum TE_NIC register + * needs to be enabled. If the global outer UDP checksum TE_NIC register + * bit is set, outer UDP checksum will be calculated for the following + * cases: + * 1. Packets with tcp_udp_chksum flag set to offload checksum for inner + * packet AND the inner packet is TCP/UDP. If the inner packet is ICMP for + * example (non-TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP + * checksum will not be calculated. + * 2. Packets with lso flag set which implies inner TCP checksum calculation + * as part of LSO operation. + */ + #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10) + /* + * If set to 1, the device will treat this packet with LSO(Large + * Send Offload) processing for both normal or encapsulated + * packets, which is a form of TCP segmentation. When this bit + * is 1, the hdr_size and mss fields must be valid. The driver + * doesn't need to set t_ip_chksum, ip_chksum, and tcp_udp_chksum + * flags since the controller will replace the appropriate + * checksum fields for segmented packets. + * + * When this bit is 1, the hdr_size and mss fields must be valid. */ - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7) - #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \ - RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS - /* This bit is '1' if the RSS field in this completion is valid. */ - #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) - /* unused is 1 b */ - #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800) + #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20) /* - * This value indicates what the inner packet determined for the - * packet was. + * If set to zero when LSO is '1', then the IPID will be treated + * as a 16b number and will be wrapped if it exceeds a value of + * 0xffff. + * + * If set to one when LSO is '1', then the IPID will be treated + * as a 15b number and will be wrapped if it exceeds a value 0f + * 0x7fff. */ - #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) - #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12 - /* TCP Packet: Indicates that the packet was IP and TCP. */ - #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12) - #define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \ - RX_TPA_START_CMPL_FLAGS_ITYPE_TCP - #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0) - #define RX_TPA_START_CMPL_FLAGS_SFT 6 - uint16_t len; + #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40) /* - * This value indicates the amount of packet data written to the - * buffer the opaque field in this completion corresponds to. + * If set to zero when LSO is '1', then the IPID of the tunnel + * IP header will not be modified during LSO operations. + * + * If set to one when LSO is '1', then the IPID of the tunnel + * IP header will be incremented for each subsequent segment of an + * LSO operation. + * + * The flag is ignored if the LSO packet is a normal (non-tunneled) + * TCP packet. */ - uint32_t opaque; - /* - * This is a copy of the opaque field from the RX BD this - * completion corresponds to. - */ - uint8_t v1; - /* unused1 is 7 b */ - /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. - */ - #define RX_TPA_START_CMPL_V1 UINT32_C(0x1) - /* unused1 is 7 b */ - uint8_t rss_hash_type; - /* - * This is the RSS hash type for the packet. The value is packed - * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]} - * . The value of tuple_extrac_op provides the information about - * what fields the hash was computed on. * 0: The RSS hash was - * computed over source IP address, destination IP address, - * source port, and destination port of inner IP and TCP or UDP - * headers. Note: For non-tunneled packets, the packet headers - * are considered inner packet headers for the RSS hash - * computation purpose. * 1: The RSS hash was computed over - * source IP address and destination IP address of inner IP - * header. Note: For non-tunneled packets, the packet headers - * are considered inner packet headers for the RSS hash - * computation purpose. * 2: The RSS hash was computed over - * source IP address, destination IP address, source port, and - * destination port of IP and TCP or UDP headers of outer tunnel - * headers. Note: For non-tunneled packets, this value is not - * applicable. * 3: The RSS hash was computed over source IP - * address and destination IP address of IP header of outer - * tunnel headers. Note: For non-tunneled packets, this value is - * not applicable. Note that 4-tuples values listed above are - * applicable for layer 4 protocols supported and enabled for - * RSS in the hardware, HWRM firmware, and drivers. For example, - * if RSS hash is supported and enabled for TCP traffic only, - * then the values of tuple_extract_op corresponding to 4-tuples - * are only valid for TCP traffic. - */ - uint16_t agg_id; + #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80) /* - * This is the aggregation ID that the completion is associated - * with. Use this number to correlate the TPA start completion - * with the TPA end completion. + * If set to '1', then the RoCE ICRC will be appended to the + * packet. Packet must be a valid RoCE format packet. */ - /* unused2 is 9 b */ + #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100) /* - * This is the aggregation ID that the completion is associated - * with. Use this number to correlate the TPA start completion - * with the TPA end completion. + * If set to '1', then the FCoE CRC will be appended to the + * packet. Packet must be a valid FCoE format packet. */ - #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00) - #define RX_TPA_START_CMPL_AGG_ID_SFT 9 - uint32_t rss_hash; + #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200) + uint16_t hdr_size; /* - * This value is the RSS hash value calculated for the packet - * based on the mode bits and key value in the VNIC. + * When LSO is '1', this field must contain the offset of the + * TCP payload from the beginning of the packet in as + * 16b words. In case of encapsulated/tunneling packet, this field + * contains the offset of the inner TCP payload from beginning of the + * packet as 16-bit words. + * + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff) + #define TX_BD_LONG_HDR_SIZE_SFT 0 + uint32_t mss; + /* + * This is the MSS value that will be used to do the LSO processing. + * The value is the length in bytes of the TCP payload for each + * segment generated by the LSO operation. + * + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff) + #define TX_BD_LONG_MSS_SFT 0 + uint16_t unused2; + /* + * This value selects a CFA action to perform on the packet. + * Set this value to zero if no CFA action is desired. + * + * This value must be valid on the first BD of a packet. + */ + uint16_t cfa_action; + /* + * This value is action meta-data that defines CFA edit operations + * that are done in addition to any action editing. + */ + uint32_t cfa_meta; + /* When key=1, This is the VLAN tag VID value. */ + #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff) + #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0 + /* When key=1, This is the VLAN tag DE value. */ + #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000) + /* When key=1, This is the VLAN tag PRI value. */ + #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000) + #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13 + /* When key=1, This is the VLAN tag TPID select value. */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000) + #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16 + /* 0x88a8 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16) + /* 0x8100 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16) + /* 0x9100 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16) + /* 0x9200 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16) + /* 0x9300 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16) + /* Value programmed in CFA VLANTPID register. */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16) + #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \ + TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG + /* When key=1, This is the VLAN tag TPID select value. */ + #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000) + #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19 + /* + * This field identifies the type of edit to be performed + * on the packet. + * + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000) + #define TX_BD_LONG_CFA_META_KEY_SFT 28 + /* No editing */ + #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28) + /* + * - meta[17:16] - TPID select value (0 = 0x8100). + * - meta[15:12] - PRI/DE value. + * - meta[11:0] - VID value. */ + #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28) + #define TX_BD_LONG_CFA_META_KEY_LAST \ + TX_BD_LONG_CFA_META_KEY_VLAN_TAG } __attribute__((packed)); -/* last 16 bytes of RX L2 TPA Start Completion Record */ -struct rx_tpa_start_cmpl_hi { - uint32_t flags2; +/* tx_bd_empty (size:128b/16B) */ +struct tx_bd_empty { + /* This value identifies the type of buffer descriptor. */ + uint8_t type; + #define TX_BD_EMPTY_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_EMPTY_TYPE_SFT 0 + /* + * Indicates that this BD is 1BB long and is an empty + * TX BD. Not valid for use by the driver. + */ + #define TX_BD_EMPTY_TYPE_TX_BD_EMPTY UINT32_C(0x1) + #define TX_BD_EMPTY_TYPE_LAST TX_BD_EMPTY_TYPE_TX_BD_EMPTY + uint8_t unused_1[3]; + uint8_t unused_2; + uint8_t unused_3[3]; + uint8_t unused_4[8]; +} __attribute__((packed)); + +/* rx_prod_pkt_bd (size:128b/16B) */ +struct rx_prod_pkt_bd { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f) + #define RX_PROD_PKT_BD_TYPE_SFT 0 /* - * This indicates that the ip checksum was calculated for the - * inner packet and that the sum passed for all segments - * included in the aggregation. + * Indicates that this BD is 16B long and is an RX Producer + * (ie. empty) buffer descriptor. */ - #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) + #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4) + #define RX_PROD_PKT_BD_TYPE_LAST \ + RX_PROD_PKT_BD_TYPE_RX_PROD_PKT + #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PROD_PKT_BD_FLAGS_SFT 6 /* - * This indicates that the TCP, UDP or ICMP checksum was - * calculated for the inner packet and that the sum passed for - * all segments included in the aggregation. + * If set to 1, the packet will be placed at the address plus + * 2B. The 2 Bytes of padding will be written as zero. */ - #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) + #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40) /* - * This indicates that the ip checksum was calculated for the - * tunnel header and that the sum passed for all segments - * included in the aggregation. + * If set to 1, the packet write will be padded out to the + * nearest cache-line with zero value padding. */ - #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) - /* - * This indicates that the UDP checksum was calculated for the - * tunnel packet and that the sum passed for all segments - * included in the aggregation. + #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80) + /* + * This value is the number of additional buffers in the ring that + * describe the buffer space to be consumed for the this packet. + * If the value is zero, then the packet must fit within the + * space described by this BD. If this value is 1 or more, it + * indicates how many additional "buffer" BDs are in the ring + * immediately following this BD to be used for the same + * network packet. + * + * Even if the packet to be placed does not need all the + * additional buffers, they will be consumed anyway. */ - #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) - /* This value indicates what format the metadata field is. */ - #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) - #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4 - /* No metadata informtaion. Value is zero. */ - #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4) - /* - * The metadata field contains the VLAN tag and - * TPID value. - metadata[11:0] contains the - * vlan VID value. - metadata[12] contains the - * vlan DE value. - metadata[15:13] contains the - * vlan PRI value. - metadata[31:16] contains - * the vlan TPID value. - */ - #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4) - #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \ - RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN + #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300) + #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8 /* - * This field indicates the IP type for the inner-most IP - * header. A value of '0' indicates IPv4. A value of '1' - * indicates IPv6. + * This is the length in Bytes of the host physical buffer where + * data for the packet may be placed in host memory. */ - #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) - uint32_t metadata; + uint16_t len; /* - * This is data from the CFA block as indicated by the - * meta_format field. + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with this + * receive buffer set. */ - /* When meta_format=1, this value is the VLAN VID. */ - #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff) - #define RX_TPA_START_CMPL_METADATA_VID_SFT 0 - /* When meta_format=1, this value is the VLAN DE. */ - #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000) - /* When meta_format=1, this value is the VLAN PRI. */ - #define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) - #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13 - /* When meta_format=1, this value is the VLAN TPID. */ - #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) - #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16 - uint16_t v2; - /* unused4 is 15 b */ + uint32_t opaque; /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * This is the host physical address where data for the packet may + * by placed in host memory. */ - #define RX_TPA_START_CMPL_V2 UINT32_C(0x1) - /* unused4 is 15 b */ - uint16_t cfa_code; + uint64_t address; +} __attribute__((packed)); + +/* rx_prod_bfr_bd (size:128b/16B) */ +struct rx_prod_bfr_bd { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define RX_PROD_BFR_BD_TYPE_MASK UINT32_C(0x3f) + #define RX_PROD_BFR_BD_TYPE_SFT 0 /* - * This field identifies the CFA action rule that was used for - * this packet. + * Indicates that this BD is 16B long and is an RX + * Producer Buffer BD. */ - uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset; + #define RX_PROD_BFR_BD_TYPE_RX_PROD_BFR UINT32_C(0x5) + #define RX_PROD_BFR_BD_TYPE_LAST RX_PROD_BFR_BD_TYPE_RX_PROD_BFR + #define RX_PROD_BFR_BD_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PROD_BFR_BD_FLAGS_SFT 6 /* - * This is the size in bytes of the inner most L4 header. This - * can be subtracted from the payload_offset to determine the - * start of the inner most L4 header. + * This is the length in Bytes of the host physical buffer where + * data for the packet may be placed in host memory. */ + uint16_t len; + /* This field is not used. */ + uint32_t opaque; /* - * This is the offset from the beginning of the packet in bytes - * for the outer L3 header. If there is no outer L3 header, then - * this value is zero. + * This is the host physical address where data for the packet may + * by placed in host memory. */ - #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff) - #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0 + uint64_t address; +} __attribute__((packed)); + +/* rx_prod_agg_bd (size:128b/16B) */ +struct rx_prod_agg_bd { + /* This value identifies the type of buffer descriptor. */ + uint16_t flags_type; + /* This value identifies the type of buffer descriptor. */ + #define RX_PROD_AGG_BD_TYPE_MASK UINT32_C(0x3f) + #define RX_PROD_AGG_BD_TYPE_SFT 0 /* - * This is the offset from the beginning of the packet in bytes - * for the inner most L2 header. + * Indicates that this BD is 16B long and is an + * RX Producer Assembly Buffer Descriptor. */ - #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00) - #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9 + #define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG UINT32_C(0x6) + #define RX_PROD_AGG_BD_TYPE_LAST \ + RX_PROD_AGG_BD_TYPE_RX_PROD_AGG + #define RX_PROD_AGG_BD_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PROD_AGG_BD_FLAGS_SFT 6 /* - * This is the offset from the beginning of the packet in bytes - * for the inner most L3 header. + * If set to 1, the packet write will be padded out to the + * nearest cache-line with zero value padding. */ - #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000) - #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18 + #define RX_PROD_AGG_BD_FLAGS_EOP_PAD UINT32_C(0x40) /* - * This is the size in bytes of the inner most L4 header. This - * can be subtracted from the payload_offset to determine the - * start of the inner most L4 header. + * This is the length in Bytes of the host physical buffer where + * data for the packet may be placed in host memory. */ - #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000) - #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27 -} __attribute__((packed)); - -/* RX TPA End Completion Record (32 bytes split to 2 16-byte struct) */ -struct rx_tpa_end_cmpl { - uint16_t flags_type; + uint16_t len; /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with this + * receive assembly buffer. */ - #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f) - #define RX_TPA_END_CMPL_TYPE_SFT 0 + uint32_t opaque; /* - * RX L2 TPA End Completion: Completion at the - * end of a TPA operation. Length = 32B + * This is the host physical address where data for the packet may + * by placed in host memory. */ - #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15) + uint64_t address; +} __attribute__((packed)); + +/* cmpl_base (size:128b/16B) */ +struct cmpl_base { + uint16_t type; /* - * When this bit is '1', it indicates a packet that has an error - * of some type. Type of error is indicated in error_flags. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40) - /* This field indicates how the packet was placed in the buffer. */ - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7 + #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f) + #define CMPL_BASE_TYPE_SFT 0 /* - * Jumbo: TPA Packet was placed using jumbo - * algorithm. This means that the first buffer - * will be filled with data before moving to - * aggregation buffers. Each aggregation buffer - * will be filled before moving to the next - * aggregation buffer. + * TX L2 completion: + * Completion of TX packet. Length = 16B */ - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) + #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0) /* - * Header/Data Separation: Packet was placed - * using Header/Data separation algorithm. The - * separation location is indicated by the itype - * field. + * RX L2 completion: + * Completion of and L2 RX packet. Length = 32B */ - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) + #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11) /* - * GRO/Jumbo: Packet will be placed using - * GRO/Jumbo where the first packet is filled - * with data. Subsequent packets will be placed - * such that any one packet does not span two - * aggregation buffers unless it starts at the - * beginning of an aggregation buffer. + * RX Aggregation Buffer completion : + * Completion of an L2 aggregation buffer in support of + * TPA, HDS, or Jumbo packet completion. Length = 16B */ - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO (UINT32_C(0x5) << 7) + #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12) /* - * GRO/Header-Data Separation: Packet will be - * placed using GRO/HDS where the header is in - * the first packet. Payload of each packet will - * be placed such that any one packet does not - * span two aggregation buffers unless it starts - * at the beginning of an aggregation buffer. + * RX L2 TPA Start Completion: + * Completion at the beginning of a TPA operation. + * Length = 32B */ - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7) - #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \ - RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS - /* unused is 2 b */ - #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00) - #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10 - /* - * This value indicates what the inner packet determined for the - * packet was. - 2 TCP Packet Indicates that the packet was IP - * and TCP. This indicates that the ip_cs field is valid and - * that the tcp_udp_cs field is valid and contains the TCP - * checksum. This also indicates that the payload_offset field - * is valid. - */ - #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) - #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12 - #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0) - #define RX_TPA_END_CMPL_FLAGS_SFT 6 - uint16_t len; - /* - * This value is zero for TPA End completions. There is no data - * in the buffer that corresponds to the opaque value in this - * completion. - */ - uint32_t opaque; - /* - * This is a copy of the opaque field from the RX BD this - * completion corresponds to. - */ - uint8_t agg_bufs_v1; - /* unused1 is 1 b */ - /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. - */ - #define RX_TPA_END_CMPL_V1 UINT32_C(0x1) - /* - * This value is the number of aggregation buffers that follow - * this entry in the completion ring that are a part of this - * aggregation packet. If the value is zero, then the packet is - * completely contained in the buffer space provided in the - * aggregation start completion. - */ - #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e) - #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1 - /* unused1 is 1 b */ - uint8_t tpa_segs; - /* This value is the number of segments in the TPA operation. */ - uint8_t payload_offset; + #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13) /* - * This value indicates the offset in bytes from the beginning - * of the packet where the inner payload starts. This value is - * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero - * indicates an offset of 256 bytes. + * RX L2 TPA End Completion: + * Completion at the end of a TPA operation. + * Length = 32B */ - uint8_t agg_id; + #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15) /* - * This is the aggregation ID that the completion is associated - * with. Use this number to correlate the TPA start completion - * with the TPA end completion. + * Statistics Ejection Completion: + * Completion of statistics data ejection buffer. + * Length = 16B */ - /* unused2 is 1 b */ + #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a) /* - * This is the aggregation ID that the completion is associated - * with. Use this number to correlate the TPA start completion - * with the TPA end completion. + * HWRM Command Completion: + * Completion of an HWRM command. */ - #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe) - #define RX_TPA_END_CMPL_AGG_ID_SFT 1 - uint32_t tsdelta; + #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20) + /* Forwarded HWRM Request */ + #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22) + /* Forwarded HWRM Response */ + #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24) + /* HWRM Asynchronous Event Information */ + #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e) + /* CQ Notification */ + #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30) + /* SRQ Threshold Event */ + #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32) + /* DBQ Threshold Event */ + #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34) + /* QP Async Notification */ + #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38) + /* Function Async Notification */ + #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a) + #define CMPL_BASE_TYPE_LAST CMPL_BASE_TYPE_FUNC_EVENT + /* info1 is 16 b */ + uint16_t info1; + /* info2 is 32 b */ + uint32_t info2; /* - * For non-GRO packets, this value is the timestamp delta - * between earliest and latest timestamp values for TPA packet. - * If packets were not time stamped, then delta will be zero. - * For GRO packets, this field is zero except for the following - * sub-fields. - tsdelta[31] Timestamp present indication. When - * '0', no Timestamp option is in the packet. When '1', then a - * Timestamp option is present in the packet. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ + uint32_t info3_v; + #define CMPL_BASE_V UINT32_C(0x1) + #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe) + #define CMPL_BASE_INFO3_SFT 1 + /* info4 is 32 b */ + uint32_t info4; } __attribute__((packed)); -/* last 16 bytes of RX TPA End Completion Record */ -struct rx_tpa_end_cmpl_hi { - uint32_t tpa_dup_acks; - /* unused3 is 28 b */ +/* tx_cmpl (size:128b/16B) */ +struct tx_cmpl { + uint16_t flags_type; /* - * This value is the number of duplicate ACKs that have been - * received as part of the TPA operation. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf) - #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0 - /* unused3 is 28 b */ - uint16_t tpa_seg_len; - /* - * This value is the valid when TPA completion is active. It - * indicates the length of the longest segment of the TPA - * operation for LRO mode and the length of the first segment in - * GRO mode. This value may be used by GRO software to re- - * construct the original packet stream from the TPA packet. - * This is the length of all but the last segment for GRO. In - * LRO mode this value may be used to indicate MSS size to the - * stack. - */ - uint16_t unused_3; - /* unused4 is 16 b */ - uint16_t errors_v2; + #define TX_CMPL_TYPE_MASK UINT32_C(0x3f) + #define TX_CMPL_TYPE_SFT 0 /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * TX L2 completion: + * Completion of TX packet. Length = 16B */ - #define RX_TPA_END_CMPL_V2 UINT32_C(0x1) + #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0) + #define TX_CMPL_TYPE_LAST TX_CMPL_TYPE_TX_L2 + #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define TX_CMPL_FLAGS_SFT 6 /* - * This error indicates that there was some sort of problem with - * the BDs for the packet that was found after part of the - * packet was already placed. The packet should be treated as - * invalid. + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. */ - #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) - #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40) /* - * This error occurs when there is a fatal HW - * problem in the chip only. It indicates that - * there were not BDs on chip but that there was - * adequate reservation. provided by the TPA - * block. + * When this bit is '1', it indicates that the packet completed + * was transmitted using the push acceleration data provided + * by the driver. When this bit is '0', it indicates that the + * packet had not push acceleration data written or was executed + * as a normal packet even though push data was provided. */ - #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ - (UINT32_C(0x2) << 1) + #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80) + /* unused1 is 16 b */ + uint16_t unused_0; /* - * This error occurs when TPA block was not - * configured to reserve adequate BDs for TPA - * operations on this RX ring. All data for the - * TPA operation was not placed. This error can - * also be generated when the number of segments - * is not programmed correctly in TPA and the 33 - * total aggregation buffers allowed for the TPA - * operation has been exceeded. + * This is a copy of the opaque field from the first TX BD of this + * transmitted packet. */ - #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \ - (UINT32_C(0x4) << 1) - #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \ - RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR - #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe) - #define RX_TPA_END_CMPL_ERRORS_SFT 1 - uint16_t unused_4; - /* unused5 is 16 b */ - uint32_t start_opaque; + uint32_t opaque; + uint16_t errors_v; /* - * This is the opaque value that was completed for the TPA start - * completion that corresponds to this TPA end completion. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ -} __attribute__((packed)); - -/* HWRM Forwarded Request (16 bytes) */ -struct hwrm_fwd_req_cmpl { - uint16_t req_len_type; - /* Length of forwarded request in bytes. */ + #define TX_CMPL_V UINT32_C(0x1) + #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe) + #define TX_CMPL_ERRORS_SFT 1 /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * This error indicates that there was some sort of problem + * with the BDs for the packet. */ - #define HWRM_FWD_INPUT_CMPL_TYPE_MASK UINT32_C(0x3f) - #define HWRM_FWD_INPUT_CMPL_TYPE_SFT 0 - /* Forwarded HWRM Request */ - #define HWRM_FWD_INPUT_CMPL_TYPE_HWRM_FWD_INPUT UINT32_C(0x22) - /* Length of forwarded request in bytes. */ - #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0) - #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6 - uint16_t source_id; + #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) + #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No error */ + #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1) /* - * Source ID of this request. Typically used in forwarding - * requests and responses. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - - * HWRM + * Bad Format: + * BDs were not formatted correctly. */ - uint32_t unused_0; - /* unused1 is 32 b */ - uint32_t req_buf_addr_v[2]; - /* Address of forwarded request. */ + #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1) + #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \ + TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * When this bit is '1', it indicates that the length of + * the packet was zero. No packet was transmitted. */ - #define HWRM_FWD_INPUT_CMPL_V UINT32_C(0x1) - /* Address of forwarded request. */ - #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe) - #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 -} __attribute__((packed)); - -/* HWRM Asynchronous Event Completion Record (16 bytes) */ -struct hwrm_async_event_cmpl { - uint16_t type; - /* unused1 is 10 b */ + #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10) /* - * This field indicates the exact type of the completion. By - * convention, the LSB identifies the length of the record in - * 16B units. Even values indicate 16B records. Odd values - * indicate 32B records. + * When this bit is '1', it indicates that the packet + * was longer than the programmed limit in TDI. No + * packet was transmitted. */ - #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f) - #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0 - /* HWRM Asynchronous Event Information */ - #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e) - /* unused1 is 10 b */ - uint16_t event_id; - /* Identifiers of events. */ - /* Link status changed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE UINT32_C(0x0) - /* Link MTU changed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE UINT32_C(0x1) - /* Link speed changed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2) - /* DCB Configuration changed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3) - /* Port connection not allowed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED UINT32_C(0x4) - /* Link speed configuration was not allowed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \ - UINT32_C(0x5) - /* Link speed configuration change */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE UINT32_C(0x6) - /* Port PHY configuration change */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE UINT32_C(0x7) - /* Function driver unloaded */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10) - /* Function driver loaded */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD UINT32_C(0x11) - /* Function FLR related processing has completed */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT UINT32_C(0x12) - /* PF driver unloaded */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD UINT32_C(0x20) - /* PF driver loaded */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD UINT32_C(0x21) - /* VF Function Level Reset (FLR) */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR UINT32_C(0x30) - /* VF MAC Address Change */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE UINT32_C(0x31) - /* PF-VF communication channel status change. */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \ - UINT32_C(0x32) - /* VF Configuration Change */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE UINT32_C(0x33) - /* LLFC/PFC Configuration Change */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE UINT32_C(0x34) - /* HWRM Error */ - #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR UINT32_C(0xff) - uint32_t event_data2; - /* Event specific data */ - uint8_t opaque_v; - /* opaque is 7 b */ + #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20) /* - * This value is written by the NIC such that it will be - * different for each pass through the completion queue. The - * even passes will write 1. The odd passes will write 0. + * When this bit is '1', it indicates that one or more of the + * BDs associated with this packet generated a PCI error. + * This probably means the address was not valid. */ - #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1) - /* opaque is 7 b */ - #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe) - #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 - uint8_t timestamp_lo; - /* 8-lsb timestamp from POR (100-msec resolution) */ - uint16_t timestamp_hi; - /* 16-lsb timestamp from POR (100-msec resolution) */ - uint32_t event_data1; - /* Event specific data */ -} __attribute__((packed)); - -/* hwrm_ver_get */ -/* - * Description: This function is called by a driver to determine the HWRM - * interface version supported by the HWRM firmware, the version of HWRM - * firmware implementation, the name of HWRM firmware, the versions of other - * embedded firmwares, and the names of other embedded firmwares, etc. Any - * interface or firmware version with major = 0, minor = 0, and update = 0 shall - * be considered an invalid version. - */ -/* Input (24 bytes) */ -struct hwrm_ver_get_input { - uint16_t req_type; + #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', it indicates that the packet was longer + * than indicated by the hint. No packet was transmitted. */ - uint16_t cmpl_ring; + #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', it indicates that the packet was + * dropped due to Poison TLP error on one or more of the + * TLPs in the PXP completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100) + /* unused2 is 16 b */ + uint16_t unused_1; + /* unused3 is 32 b */ + uint32_t unused_2; +} __attribute__((packed)); + +/* rx_pkt_cmpl (size:128b/16B) */ +struct rx_pkt_cmpl { + uint16_t flags_type; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - uint64_t resp_addr; + #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_PKT_CMPL_TYPE_SFT 0 /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * RX L2 completion: + * Completion of and L2 RX packet. Length = 32B */ - uint8_t hwrm_intf_maj; + #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11) + #define RX_PKT_CMPL_TYPE_LAST RX_PKT_CMPL_TYPE_RX_L2 + #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PKT_CMPL_FLAGS_SFT 6 /* - * This field represents the major version of HWRM interface - * specification supported by the driver HWRM implementation. - * The interface major version is intended to change only when - * non backward compatible changes are made to the HWRM - * interface specification. + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. */ - uint8_t hwrm_intf_min; + #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7 /* - * This field represents the minor version of HWRM interface - * specification supported by the driver HWRM implementation. A - * change in interface minor version is used to reflect - * significant backward compatible modification to HWRM - * interface specification. This can be due to addition or - * removal of functionality. HWRM interface specifications with - * the same major version but different minor versions are - * compatible. + * Normal: + * Packet was placed using normal algorithm. */ - uint8_t hwrm_intf_upd; + #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7) /* - * This field represents the update version of HWRM interface - * specification supported by the driver HWRM implementation. - * The interface update version is used to reflect minor changes - * or bug fixes to a released HWRM interface specification. + * Jumbo: + * Packet was placed using jumbo algorithm. */ - uint8_t unused_0[5]; -} __attribute__((packed)); - -/* Output (128 bytes) */ -struct hwrm_ver_get_output { - uint16_t error_code; + #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) + #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \ + RX_PKT_CMPL_FLAGS_PLACEMENT_HDS + /* This bit is '1' if the RSS field in this completion is valid. */ + #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) + /* unused is 1 b */ + #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This value indicates what the inner packet determined for the + * packet was. */ - uint8_t hwrm_intf_maj; + #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12 /* - * This field represents the major version of HWRM interface - * specification supported by the HWRM implementation. The - * interface major version is intended to change only when non - * backward compatible changes are made to the HWRM interface - * specification. A HWRM implementation that is compliant with - * this specification shall provide value of 1 in this field. + * Not Known: + * Indicates that the packet type was not known. */ - uint8_t hwrm_intf_min; + #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN \ + (UINT32_C(0x0) << 12) /* - * This field represents the minor version of HWRM interface - * specification supported by the HWRM implementation. A change - * in interface minor version is used to reflect significant - * backward compatible modification to HWRM interface - * specification. This can be due to addition or removal of - * functionality. HWRM interface specifications with the same - * major version but different minor versions are compatible. A - * HWRM implementation that is compliant with this specification - * shall provide value of 2 in this field. + * IP Packet: + * Indicates that the packet was an IP packet, but further + * classification was not possible. */ - uint8_t hwrm_intf_upd; + #define RX_PKT_CMPL_FLAGS_ITYPE_IP \ + (UINT32_C(0x1) << 12) /* - * This field represents the update version of HWRM interface - * specification supported by the HWRM implementation. The - * interface update version is used to reflect minor changes or - * bug fixes to a released HWRM interface specification. A HWRM - * implementation that is compliant with this specification - * shall provide value of 2 in this field. + * TCP Packet: + * Indicates that the packet was IP and TCP. + * This indicates that the payload_offset field is valid. */ - uint8_t hwrm_intf_rsvd; - uint8_t hwrm_fw_maj; + #define RX_PKT_CMPL_FLAGS_ITYPE_TCP \ + (UINT32_C(0x2) << 12) /* - * This field represents the major version of HWRM firmware. A - * change in firmware major version represents a major firmware - * release. + * UDP Packet: + * Indicates that the packet was IP and UDP. + * This indicates that the payload_offset field is valid. */ - uint8_t hwrm_fw_min; + #define RX_PKT_CMPL_FLAGS_ITYPE_UDP \ + (UINT32_C(0x3) << 12) /* - * This field represents the minor version of HWRM firmware. A - * change in firmware minor version represents significant - * firmware functionality changes. + * FCoE Packet: + * Indicates that the packet was recognized as a FCoE. + * This also indicates that the payload_offset field is valid. */ - uint8_t hwrm_fw_bld; + #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE \ + (UINT32_C(0x4) << 12) /* - * This field represents the build version of HWRM firmware. A - * change in firmware build version represents bug fixes to a - * released firmware. + * RoCE Packet: + * Indicates that the packet was recognized as a RoCE. + * This also indicates that the payload_offset field is valid. */ - uint8_t hwrm_fw_rsvd; + #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE \ + (UINT32_C(0x5) << 12) /* - * This field is a reserved field. This field can be used to - * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version of the HWRM - * firmware. + * ICMP Packet: + * Indicates that the packet was recognized as ICMP. + * This indicates that the payload_offset field is valid. */ - uint8_t mgmt_fw_maj; + #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP \ + (UINT32_C(0x7) << 12) /* - * This field represents the major version of mgmt firmware. A - * change in major version represents a major release. + * PtP packet wo/timestamp: + * Indicates that the packet was recognized as a PtP + * packet. */ - uint8_t mgmt_fw_min; + #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP \ + (UINT32_C(0x8) << 12) /* - * This field represents the minor version of mgmt firmware. A - * change in minor version represents significant functionality - * changes. + * PtP packet w/timestamp: + * Indicates that the packet was recognized as a PtP + * packet and that a timestamp was taken for the packet. */ - uint8_t mgmt_fw_bld; + #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP \ + (UINT32_C(0x9) << 12) + #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \ + RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP /* - * This field represents the build version of mgmt firmware. A - * change in update version represents bug fixes. + * This is the length of the data for the packet stored in the + * buffer(s) identified by the opaque value. This includes + * the packet BD and any associated buffer BDs. This does not include + * the the length of any data places in aggregation BDs. */ - uint8_t mgmt_fw_rsvd; + uint16_t len; /* - * This field is a reserved field. This field can be used to - * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. */ - uint8_t netctrl_fw_maj; + uint32_t opaque; + uint8_t agg_bufs_v1; /* - * This field represents the major version of network control - * firmware. A change in major version represents a major - * release. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint8_t netctrl_fw_min; + #define RX_PKT_CMPL_V1 UINT32_C(0x1) /* - * This field represents the minor version of network control - * firmware. A change in minor version represents significant - * functionality changes. + * This value is the number of aggregation buffers that follow this + * entry in the completion ring that are a part of this packet. + * If the value is zero, then the packet is completely contained + * in the buffer space provided for the packet in the RX ring. */ - uint8_t netctrl_fw_bld; + #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e) + #define RX_PKT_CMPL_AGG_BUFS_SFT 1 + /* unused1 is 2 b */ + #define RX_PKT_CMPL_UNUSED1_MASK UINT32_C(0xc0) + #define RX_PKT_CMPL_UNUSED1_SFT 6 + /* + * This is the RSS hash type for the packet. The value is packed + * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}. + * + * The value of tuple_extrac_op provides the information about + * what fields the hash was computed on. + * * 0: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of inner + * IP and TCP or UDP headers. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 1: The RSS hash was computed over source IP address and destination + * IP address of inner IP header. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 2: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of + * IP and TCP or UDP headers of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * * 3: The RSS hash was computed over source IP address and + * destination IP address of IP header of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * + * Note that 4-tuples values listed above are applicable + * for layer 4 protocols supported and enabled for RSS in the hardware, + * HWRM firmware, and drivers. For example, if RSS hash is supported and + * enabled for TCP traffic only, then the values of tuple_extract_op + * corresponding to 4-tuples are only valid for TCP traffic. + */ + uint8_t rss_hash_type; + /* + * This value indicates the offset in bytes from the beginning of the packet + * where the inner payload starts. This value is valid for TCP, UDP, + * FCoE, and RoCE packets. + * + * A value of zero indicates that header is 256B into the packet. + */ + uint8_t payload_offset; + /* unused2 is 8 b */ + uint8_t unused1; /* - * This field represents the build version of network control - * firmware. A change in update version represents bug fixes. + * This value is the RSS hash value calculated for the packet + * based on the mode bits and key value in the VNIC. */ - uint8_t netctrl_fw_rsvd; + uint32_t rss_hash; +} __attribute__((packed)); + +/* rx_pkt_cmpl_hi (size:128b/16B) */ +struct rx_pkt_cmpl_hi { + uint32_t flags2; /* - * This field is a reserved field. This field can be used to - * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version + * This indicates that the ip checksum was calculated for the + * inner packet and that the ip_cs_error field indicates if there + * was an error. */ - uint32_t dev_caps_cfg; + #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) /* - * This field is used to indicate device's capabilities and - * configurations. + * This indicates that the TCP, UDP or ICMP checksum was + * calculated for the inner packet and that the l4_cs_error field + * indicates if there was an error. */ + #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) /* - * If set to 1, then secure firmware update behavior is - * supported. If set to 0, then secure firmware update behavior - * is not supported. + * This indicates that the ip checksum was calculated for the + * tunnel header and that the t_ip_cs_error field indicates if there + * was an error. */ - #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \ - UINT32_C(0x1) + #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) /* - * If set to 1, then firmware based DCBX agent is supported. If - * set to 0, then firmware based DCBX agent capability is not - * supported on this device. + * This indicates that the UDP checksum was + * calculated for the tunnel packet and that the t_l4_cs_error field + * indicates if there was an error. */ - #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \ - UINT32_C(0x2) + #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) + /* This value indicates what format the metadata field is. */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4 + /* No metadata informtaion. Value is zero. */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4) + /* + * The metadata field contains the VLAN tag and TPID value. + * - metadata[11:0] contains the vlan VID value. + * - metadata[12] contains the vlan DE value. + * - metadata[15:13] contains the vlan PRI value. + * - metadata[31:16] contains the vlan TPID value. + */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4) + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN /* - * If set to 1, then HWRM short command format is supported. If - * set to 0, then HWRM short command format is not supported. + * This field indicates the IP type for the inner-most IP header. + * A value of '0' indicates IPv4. A value of '1' indicates IPv6. + * This value is only valid if itype indicates a packet + * with an IP header. */ - #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \ - UINT32_C(0x4) + #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) /* - * If set to 1, then HWRM short command format is required. If - * set to 0, then HWRM short command format is not required. + * This is data from the CFA block as indicated by the meta_format + * field. */ - #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED \ - UINT32_C(0x8) - uint8_t roce_fw_maj; + uint32_t metadata; + /* When meta_format=1, this value is the VLAN VID. */ + #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff) + #define RX_PKT_CMPL_METADATA_VID_SFT 0 + /* When meta_format=1, this value is the VLAN DE. */ + #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000) + /* When meta_format=1, this value is the VLAN PRI. */ + #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) + #define RX_PKT_CMPL_METADATA_PRI_SFT 13 + /* When meta_format=1, this value is the VLAN TPID. */ + #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) + #define RX_PKT_CMPL_METADATA_TPID_SFT 16 + uint16_t errors_v2; /* - * This field represents the major version of RoCE firmware. A - * change in major version represents a major release. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint8_t roce_fw_min; + #define RX_PKT_CMPL_V2 \ + UINT32_C(0x1) + #define RX_PKT_CMPL_ERRORS_MASK \ + UINT32_C(0xfffe) + #define RX_PKT_CMPL_ERRORS_SFT 1 /* - * This field represents the minor version of RoCE firmware. A - * change in minor version represents significant functionality - * changes. + * This error indicates that there was some sort of problem with + * the BDs for the packet that was found after part of the + * packet was already placed. The packet should be treated as + * invalid. */ - uint8_t roce_fw_bld; + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK \ + UINT32_C(0xe) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No buffer error */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \ + (UINT32_C(0x0) << 1) /* - * This field represents the build version of RoCE firmware. A - * change in update version represents bug fixes. + * Did Not Fit: + * Packet did not fit into packet buffer provided. + * For regular placement, this means the packet did not fit + * in the buffer provided. For HDS and jumbo placement, this + * means that the packet could not be placed into 7 physical + * buffers or less. */ - uint8_t roce_fw_rsvd; + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \ + (UINT32_C(0x1) << 1) /* - * This field is a reserved field. This field can be used to - * represent firmware branches or customer specific releases - * tied to a specific (major,minor,update) version + * Not On Chip: + * All BDs needed for the packet were not on-chip when + * the packet arrived. */ - char hwrm_fw_name[16]; + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ + (UINT32_C(0x2) << 1) /* - * This field represents the name of HWRM FW (ASCII chars with - * NULL at the end). + * Bad Format: + * BDs were not formatted correctly. */ - char mgmt_fw_name[16]; + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \ + (UINT32_C(0x3) << 1) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT /* - * This field represents the name of mgmt FW (ASCII chars with - * NULL at the end). + * This indicates that there was an error in the IP header + * checksum. */ - char netctrl_fw_name[16]; + #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR \ + UINT32_C(0x10) /* - * This field represents the name of network control firmware - * (ASCII chars with NULL at the end). + * This indicates that there was an error in the TCP, UDP + * or ICMP checksum. */ - uint32_t reserved2[4]; + #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR \ + UINT32_C(0x20) /* - * This field is reserved for future use. The responder should - * set it to 0. The requester should ignore this field. + * This indicates that there was an error in the tunnel + * IP header checksum. */ - char roce_fw_name[16]; + #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR \ + UINT32_C(0x40) /* - * This field represents the name of RoCE FW (ASCII chars with - * NULL at the end). + * This indicates that there was an error in the tunnel + * UDP checksum. */ - uint16_t chip_num; - /* This field returns the chip number. */ - uint8_t chip_rev; - /* This field returns the revision of chip. */ - uint8_t chip_metal; - /* This field returns the chip metal number. */ - uint8_t chip_bond_id; - /* This field returns the bond id of the chip. */ - uint8_t chip_platform_type; + #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR \ + UINT32_C(0x80) /* - * This value indicates the type of platform used for chip - * implementation. + * This indicates that there was a CRC error on either an FCoE + * or RoCE packet. The itype indicates the packet type. */ - /* ASIC */ - #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0) - /* FPGA platform of the chip. */ - #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1) - /* Palladium platform of the chip. */ - #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2) - uint16_t max_req_win_len; + #define RX_PKT_CMPL_ERRORS_CRC_ERROR \ + UINT32_C(0x100) /* - * This field returns the maximum value of request window that - * is supported by the HWRM. The request window is mapped into - * device address space using MMIO. + * This indicates that there was an error in the tunnel + * portion of the packet when this + * field is non-zero. */ - uint16_t max_resp_len; - /* This field returns the maximum value of response buffer in bytes. */ - uint16_t def_req_timeout; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK \ + UINT32_C(0xe00) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9 /* - * This field returns the default request timeout value in - * milliseconds. + * No additional error occurred on the tunnel portion + * of the packet of the packet does not have a tunnel. */ - uint8_t init_pending; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR \ + (UINT32_C(0x0) << 9) /* - * This field will indicate if any subsystems is not fully - * initialized. + * Indicates that IP header version does not match + * expectation from L2 Ethertype for IPv4 and IPv6 + * in the tunnel header. */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \ + (UINT32_C(0x1) << 9) /* - * If set to 1, device is not ready. If set to 0, device is - * ready to accept all HWRM commands. + * Indicates that header length is out of range in the + * tunnel header. Valid for + * IPv4. */ - #define HWRM_VER_GET_OUTPUT_INIT_PENDING_DEV_NOT_RDY UINT32_C(0x1) - uint8_t unused_0; - uint8_t unused_1; - uint8_t valid; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \ + (UINT32_C(0x2) << 9) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Indicates that the physical packet is shorter than that + * claimed by the PPPoE header length for a tunnel PPPoE + * packet. */ -} __attribute__((packed)); - -/* hwrm_func_reset */ -/* - * Description: This command resets a hardware function (PCIe function) and - * frees any resources used by the function. This command shall be initiated by - * the driver after an FLR has occurred to prepare the function for re-use. This - * command may also be initiated by a driver prior to doing it's own - * configuration. This command puts the function into the reset state. In the - * reset state, global and port related features of the chip are not available. - */ -/* - * Note: This command will reset a function that has already been disabled or - * idled. The command returns all the resources owned by the function so a new - * driver may allocate and configure resources normally. - */ -/* Input (24 bytes) */ -struct hwrm_func_reset_input { - uint16_t req_type; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \ + (UINT32_C(0x3) << 9) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Indicates that physical packet is shorter than that claimed + * by the tunnel l3 header length. Valid for IPv4, or IPv6 + * tunnel packet packets. */ - uint16_t cmpl_ring; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \ + (UINT32_C(0x4) << 9) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Indicates that the physical packet is shorter than that + * claimed by the tunnel UDP header length for a tunnel + * UDP packet that is not fragmented. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; - /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM - */ - uint64_t resp_addr; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \ + (UINT32_C(0x5) << 9) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * indicates that the IPv4 TTL or IPv6 hop limit check + * have failed (e.g. TTL = 0) in the tunnel header. Valid + * for IPv4, and IPv6. */ - uint32_t enables; - /* This bit must be '1' for the vf_id_valid field to be configured. */ - #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) - uint16_t vf_id; + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \ + (UINT32_C(0x6) << 9) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL /* - * The ID of the VF that this PF is trying to reset. Only the - * parent PF shall be allowed to reset a child VF. A parent PF - * driver shall use this field only when a specific child VF is - * requested to be reset. + * This indicates that there was an error in the inner + * portion of the packet when this + * field is non-zero. */ - uint8_t func_reset_level; - /* This value indicates the level of a function reset. */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK \ + UINT32_C(0xf000) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12 /* - * Reset the caller function and its children - * VFs (if any). If no children functions exist, - * then reset the caller function only. + * No additional error occurred on the tunnel portion + * of the packet of the packet does not have a tunnel. */ - #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL UINT32_C(0x0) - /* Reset the caller function only */ - #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME UINT32_C(0x1) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR \ + (UINT32_C(0x0) << 12) /* - * Reset all children VFs of the caller function - * driver if the caller is a PF driver. It is an - * error to specify this level by a VF driver. - * It is an error to specify this level by a PF - * driver with no children VFs. + * Indicates that IP header version does not match + * expectation from L2 Ethertype for IPv4 and IPv6 or that + * option other than VFT was parsed on + * FCoE packet. */ - #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \ - UINT32_C(0x2) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \ + (UINT32_C(0x1) << 12) /* - * Reset a specific VF of the caller function - * driver if the caller is the parent PF driver. - * It is an error to specify this level by a VF - * driver. It is an error to specify this level - * by a PF driver that is not the parent of the - * VF that is being requested to reset. + * indicates that header length is out of range. Valid for + * IPv4 and RoCE */ - #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF UINT32_C(0x3) - uint8_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_func_reset_output { - uint16_t error_code; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \ + (UINT32_C(0x2) << 12) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * indicates that the IPv4 TTL or IPv6 hop limit check + * have failed (e.g. TTL = 0). Valid for IPv4, and IPv6 */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL \ + (UINT32_C(0x3) << 12) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Indicates that physical packet is shorter than that + * claimed by the l3 header length. Valid for IPv4, + * IPv6 packet or RoCE packets. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \ + (UINT32_C(0x4) << 12) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Indicates that the physical packet is shorter than that + * claimed by the UDP header length for a UDP packet that is + * not fragmented. */ -} __attribute__((packed)); - -/* hwrm_func_vf_cfg */ -/* - * Description: This command allows configuration of a VF by its driver. If this - * function is called by a PF driver, then the HWRM shall fail this command. If - * guest VLAN and/or MAC address are provided in this command, then the HWRM - * shall set up appropriate MAC/VLAN filters for the VF that is being - * configured. A VF driver should set VF MTU/MRU using this command prior to - * allocating RX VNICs or TX rings for the corresponding VF. - */ -/* Input (32 bytes) */ -struct hwrm_func_vf_cfg_input { - uint16_t req_type; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \ + (UINT32_C(0x5) << 12) /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. + * Indicates that TCP header length > IP payload. Valid for + * TCP packets only. */ - uint16_t cmpl_ring; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \ + (UINT32_C(0x6) << 12) + /* Indicates that TCP header length < 5. Valid for TCP. */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \ + (UINT32_C(0x7) << 12) /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. + * Indicates that TCP option headers result in a TCP header + * size that does not match data offset in TCP header. Valid + * for TCP. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \ + (UINT32_C(0x8) << 12) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + * This field identifies the CFA action rule that was used for this + * packet. */ - uint64_t resp_addr; + uint16_t cfa_code; + uint32_t reorder; /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. + * This value holds the reordering sequence number for the packet. + * If the reordering sequence is not valid, then this value is zero. + * The reordering domain for the packet is in the bottom 8 to 10b of + * the rss_hash value. The bottom 20b of this value contain the + * ordering domain value for the packet. */ - uint32_t enables; - /* This bit must be '1' for the mtu field to be configured. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1) - /* This bit must be '1' for the guest_vlan field to be configured. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2) + #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff) + #define RX_PKT_CMPL_REORDER_SFT 0 +} __attribute__((packed)); + +/* rx_tpa_start_cmpl (size:128b/16B) */ +struct rx_tpa_start_cmpl { + uint16_t flags_type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_TPA_START_CMPL_TYPE_SFT 0 + /* + * RX L2 TPA Start Completion: + * Completion at the beginning of a TPA operation. + * Length = 32B + */ + #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13) + #define RX_TPA_START_CMPL_TYPE_LAST \ + RX_TPA_START_CMPL_TYPE_RX_TPA_START + #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_TPA_START_CMPL_FLAGS_SFT 6 + /* This bit will always be '0' for TPA start completions. */ + #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7 /* - * This bit must be '1' for the async_event_cr field to be configured. + * Jumbo: + * TPA Packet was placed using jumbo algorithm. This means + * that the first buffer will be filled with data before + * moving to aggregation buffers. Each aggregation buffer + * will be filled before moving to the next aggregation + * buffer. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4) - /* This bit must be '1' for the dflt_mac_addr field to be configured. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8) - uint16_t mtu; + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO \ + (UINT32_C(0x1) << 7) /* - * The maximum transmission unit requested on the function. The HWRM - * should make sure that the mtu of the function does not exceed the mtu - * of the physical port that this function is associated with. In - * addition to requesting mtu per function, it is possible to configure - * mtu per transmit ring. By default, the mtu of each transmit ring - * associated with a function is equal to the mtu of the function. The - * HWRM should make sure that the mtu of each transmit ring that is - * assigned to a function has a valid mtu. + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. */ - uint16_t guest_vlan; + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS \ + (UINT32_C(0x2) << 7) /* - * The guest VLAN for the function being configured. This field's format - * is same as 802.1Q Tag's Tag Control Information (TCI) format that - * includes both Priority Code Point (PCP) and VLAN Identifier (VID). + * GRO/Jumbo: + * Packet will be placed using GRO/Jumbo where the first + * packet is filled with data. Subsequent packets will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. */ - uint16_t async_event_cr; + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ + (UINT32_C(0x5) << 7) /* - * ID of the target completion ring for receiving asynchronous event - * completions. If this field is not valid, then the HWRM shall use the - * default completion ring of the function that is being configured as - * the target completion ring for providing any asynchronous event - * completions for that function. If this field is valid, then the HWRM - * shall use the completion ring identified by this ID as the target - * completion ring for providing any asynchronous event completions for - * the function that is being configured. + * GRO/Header-Data Separation: + * Packet will be placed using GRO/HDS where the header + * is in the first packet. + * Payload of each packet will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. */ - uint8_t dflt_mac_addr[6]; + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS \ + (UINT32_C(0x6) << 7) + #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \ + RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS + /* This bit is '1' if the RSS field in this completion is valid. */ + #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) + /* unused is 1 b */ + #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800) /* - * This value is the current MAC address requested by the VF driver to - * be configured on this VF. A value of 00-00-00-00-00-00 indicates no - * MAC address configuration is requested by the VF driver. The parent - * PF driver may reject or overwrite this MAC address. + * This value indicates what the inner packet determined for the + * packet was. */ -} __attribute__((packed)); - -/* Output (16 bytes) */ - -struct hwrm_func_vf_cfg_output { - uint16_t error_code; + #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12 /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate + * TCP Packet: + * Indicates that the packet was IP and TCP. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP \ + (UINT32_C(0x2) << 12) + #define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \ + RX_TPA_START_CMPL_FLAGS_ITYPE_TCP /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This value indicates the amount of packet data written to the + * buffer the opaque field in this completion corresponds to. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t len; + /* + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. + */ + uint32_t opaque; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + uint8_t v1; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define RX_TPA_START_CMPL_V1 UINT32_C(0x1) + #define RX_TPA_START_CMPL_LAST RX_TPA_START_CMPL_V1 + /* + * This is the RSS hash type for the packet. The value is packed + * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}. + * + * The value of tuple_extrac_op provides the information about + * what fields the hash was computed on. + * * 0: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of inner + * IP and TCP or UDP headers. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 1: The RSS hash was computed over source IP address and destination + * IP address of inner IP header. Note: For non-tunneled packets, + * the packet headers are considered inner packet headers for the RSS + * hash computation purpose. + * * 2: The RSS hash was computed over source IP address, + * destination IP address, source port, and destination port of + * IP and TCP or UDP headers of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * * 3: The RSS hash was computed over source IP address and + * destination IP address of IP header of outer tunnel headers. + * Note: For non-tunneled packets, this value is not applicable. + * + * Note that 4-tuples values listed above are applicable + * for layer 4 protocols supported and enabled for RSS in the hardware, + * HWRM firmware, and drivers. For example, if RSS hash is supported and + * enabled for TCP traffic only, then the values of tuple_extract_op + * corresponding to 4-tuples are only valid for TCP traffic. + */ + uint8_t rss_hash_type; /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. */ -} __attribute__((packed)); - -/* hwrm_func_qcaps */ -/* - * Description: This command returns capabilities of a function. The input FID - * value is used to indicate what function is being queried. This allows a - * physical function driver to query virtual functions that are children of the - * physical function. The output FID value is needed to configure Rings and - * MSI-X vectors so their DMA operations appear correctly on the PCI bus. - */ -/* Input (24 bytes) */ -struct hwrm_func_qcaps_input { - uint16_t req_type; + uint16_t agg_id; + /* unused2 is 9 b */ + #define RX_TPA_START_CMPL_UNUSED2_MASK UINT32_C(0x1ff) + #define RX_TPA_START_CMPL_UNUSED2_SFT 0 /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. */ - uint16_t cmpl_ring; + #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00) + #define RX_TPA_START_CMPL_AGG_ID_SFT 9 /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This value is the RSS hash value calculated for the packet + * based on the mode bits and key value in the VNIC. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint32_t rss_hash; +} __attribute__((packed)); + +/* rx_tpa_start_cmpl_hi (size:128b/16B) */ +struct rx_tpa_start_cmpl_hi { + uint32_t flags2; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This indicates that the ip checksum was calculated for the + * inner packet and that the sum passed for all segments + * included in the aggregation. */ - uint64_t resp_addr; + #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This indicates that the TCP, UDP or ICMP checksum was + * calculated for the inner packet and that the sum passed + * for all segments included in the aggregation. */ - uint16_t fid; + #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) /* - * Function ID of the function that is being queried. 0xFF... - * (All Fs) if the query is for the requesting function. + * This indicates that the ip checksum was calculated for the + * tunnel header and that the sum passed for all segments + * included in the aggregation. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (80 bytes) */ -struct hwrm_func_qcaps_output { - uint16_t error_code; + #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This indicates that the UDP checksum was + * calculated for the tunnel packet and that the sum passed for + * all segments included in the aggregation. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) + /* This value indicates what format the metadata field is. */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4 + /* No metadata informtaion. Value is zero. */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE \ + (UINT32_C(0x0) << 4) + /* + * The metadata field contains the VLAN tag and TPID value. + * - metadata[11:0] contains the vlan VID value. + * - metadata[12] contains the vlan DE value. + * - metadata[15:13] contains the vlan PRI value. + * - metadata[31:16] contains the vlan TPID value. + */ + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN \ + (UINT32_C(0x1) << 4) + #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This field indicates the IP type for the inner-most IP header. + * A value of '0' indicates IPv4. A value of '1' indicates IPv6. */ - uint16_t fid; + #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) /* - * FID value. This value is used to identify operations on the - * PCI bus as belonging to a particular PCI function. + * This is data from the CFA block as indicated by the meta_format + * field. */ - uint16_t port_id; + uint32_t metadata; + /* When meta_format=1, this value is the VLAN VID. */ + #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff) + #define RX_TPA_START_CMPL_METADATA_VID_SFT 0 + /* When meta_format=1, this value is the VLAN DE. */ + #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000) + /* When meta_format=1, this value is the VLAN PRI. */ + #define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) + #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13 + /* When meta_format=1, this value is the VLAN TPID. */ + #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) + #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16 + uint16_t v2; /* - * Port ID of port that this function is associated with. Valid - * only for the PF. 0xFF... (All Fs) if this function is not - * associated with any port. 0xFF... (All Fs) if this function - * is called from a VF. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint32_t flags; - /* If 1, then Push mode is supported on this function. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1) + #define RX_TPA_START_CMPL_V2 UINT32_C(0x1) /* - * If 1, then the global MSI-X auto-masking is enabled for the - * device. + * This field identifies the CFA action rule that was used for this + * packet. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \ - UINT32_C(0x2) + uint16_t cfa_code; /* - * If 1, then the Precision Time Protocol (PTP) processing is - * supported on this function. The HWRM should enable PTP on - * only a single Physical Function (PF) per port. + * This is the size in bytes of the inner most L4 header. + * This can be subtracted from the payload_offset to determine + * the start of the inner most L4 header. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4) + uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset; /* - * If 1, then RDMA over Converged Ethernet (RoCE) v1 is - * supported on this function. + * This is the offset from the beginning of the packet in bytes for + * the outer L3 header. If there is no outer L3 header, then this + * value is zero. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8) + #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff) + #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0 /* - * If 1, then RDMA over Converged Ethernet (RoCE) v2 is - * supported on this function. + * This is the offset from the beginning of the packet in bytes for + * the inner most L2 header. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10) + #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00) + #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9 /* - * If 1, then control and configuration of WoL magic packet are - * supported on this function. + * This is the offset from the beginning of the packet in bytes for + * the inner most L3 header. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \ - UINT32_C(0x20) + #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000) + #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18 /* - * If 1, then control and configuration of bitmap pattern packet - * are supported on this function. + * This is the size in bytes of the inner most L4 header. + * This can be subtracted from the payload_offset to determine + * the start of the inner most L4 header. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40) + #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000) + #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27 +} __attribute__((packed)); + +/* rx_tpa_end_cmpl (size:128b/16B) */ +struct rx_tpa_end_cmpl { + uint16_t flags_type; /* - * If set to 1, then the control and configuration of rate limit - * of an allocated TX ring on the queried function is supported. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80) + #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_TPA_END_CMPL_TYPE_SFT 0 /* - * If 1, then control and configuration of minimum and maximum - * bandwidths are supported on the queried function. + * RX L2 TPA End Completion: + * Completion at the end of a TPA operation. + * Length = 32B */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100) + #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15) + #define RX_TPA_END_CMPL_TYPE_LAST \ + RX_TPA_END_CMPL_TYPE_RX_TPA_END + #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_TPA_END_CMPL_FLAGS_SFT 6 /* - * If the query is for a VF, then this flag shall be ignored. If - * this query is for a PF and this flag is set to 1, then the PF - * has the capability to set the rate limits on the TX rings of - * its children VFs. If this query is for a PF and this flag is - * set to 0, then the PF does not have the capability to set the - * rate limits on the TX rings of its children VFs. + * When this bit is '1', it indicates a packet that has an + * error of some type. Type of error is indicated in + * error_flags. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \ - UINT32_C(0x200) + #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7 + /* + * Jumbo: + * TPA Packet was placed using jumbo algorithm. This means + * that the first buffer will be filled with data before + * moving to aggregation buffers. Each aggregation buffer + * will be filled before moving to the next aggregation + * buffer. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO \ + (UINT32_C(0x1) << 7) + /* + * Header/Data Separation: + * Packet was placed using Header/Data separation algorithm. + * The separation location is indicated by the itype field. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS \ + (UINT32_C(0x2) << 7) + /* + * GRO/Jumbo: + * Packet will be placed using GRO/Jumbo where the first + * packet is filled with data. Subsequent packets will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. + */ + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \ + (UINT32_C(0x5) << 7) /* - * If the query is for a VF, then this flag shall be ignored. If - * this query is for a PF and this flag is set to 1, then the PF - * has the capability to set the minimum and/or maximum - * bandwidths for its children VFs. If this query is for a PF - * and this flag is set to 0, then the PF does not have the - * capability to set the minimum or maximum bandwidths for its - * children VFs. + * GRO/Header-Data Separation: + * Packet will be placed using GRO/HDS where the header + * is in the first packet. + * Payload of each packet will be + * placed such that any one packet does not span two + * aggregation buffers unless it starts at the beginning of + * an aggregation buffer. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400) + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS \ + (UINT32_C(0x6) << 7) + #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \ + RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS + /* unused is 2 b */ + #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00) + #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10 /* - * Standard TX Ring mode is used for the allocation of TX ring - * and underlying scheduling resources that allow bandwidth - * reservation and limit settings on the queried function. If - * set to 1, then standard TX ring mode is supported on the - * queried function. If set to 0, then standard TX ring mode is - * not available on the queried function. + * This value indicates what the inner packet determined for the + * packet was. + * - 2 TCP Packet + * Indicates that the packet was IP and TCP. This indicates + * that the ip_cs field is valid and that the tcp_udp_cs + * field is valid and contains the TCP checksum. + * This also indicates that the payload_offset field is valid. */ - #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \ - UINT32_C(0x800) - uint8_t mac_address[6]; + #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12 /* - * This value is current MAC address configured for this - * function. A value of 00-00-00-00-00-00 indicates no MAC - * address is currently configured. + * This value is zero for TPA End completions. + * There is no data in the buffer that corresponds to the opaque + * value in this completion. */ - uint16_t max_rsscos_ctx; + uint16_t len; /* - * The maximum number of RSS/COS contexts that can be allocated - * to the function. + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. */ - uint16_t max_cmpl_rings; + uint32_t opaque; /* - * The maximum number of completion rings that can be allocated - * to the function. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint16_t max_tx_rings; + uint8_t agg_bufs_v1; /* - * The maximum number of transmit rings that can be allocated to - * the function. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint16_t max_rx_rings; + #define RX_TPA_END_CMPL_V1 UINT32_C(0x1) /* - * The maximum number of receive rings that can be allocated to - * the function. + * This value is the number of aggregation buffers that follow this + * entry in the completion ring that are a part of this aggregation + * packet. + * If the value is zero, then the packet is completely contained + * in the buffer space provided in the aggregation start completion. */ - uint16_t max_l2_ctxs; + #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e) + #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1 + /* This value is the number of segments in the TPA operation. */ + uint8_t tpa_segs; /* - * The maximum number of L2 contexts that can be allocated to - * the function. + * This value indicates the offset in bytes from the beginning of the packet + * where the inner payload starts. This value is valid for TCP, UDP, + * FCoE, and RoCE packets. + * + * A value of zero indicates an offset of 256 bytes. */ - uint16_t max_vnics; + uint8_t payload_offset; + uint8_t agg_id; + /* unused2 is 1 b */ + #define RX_TPA_END_CMPL_UNUSED2 UINT32_C(0x1) /* - * The maximum number of VNICs that can be allocated to the - * function. + * This is the aggregation ID that the completion is associated + * with. Use this number to correlate the TPA start completion + * with the TPA end completion. */ - uint16_t first_vf_id; + #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe) + #define RX_TPA_END_CMPL_AGG_ID_SFT 1 /* - * The identifier for the first VF enabled on a PF. This is - * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if - * this command is called on a PF with SR-IOV disabled or on a - * VF. + * For non-GRO packets, this value is the + * timestamp delta between earliest and latest timestamp values for + * TPA packet. If packets were not time stamped, then delta will be + * zero. + * + * For GRO packets, this field is zero except for the following + * sub-fields. + * - tsdelta[31] + * Timestamp present indication. When '0', no Timestamp + * option is in the packet. When '1', then a Timestamp + * option is present in the packet. */ - uint16_t max_vfs; + uint32_t tsdelta; +} __attribute__((packed)); + +/* rx_tpa_end_cmpl_hi (size:128b/16B) */ +struct rx_tpa_end_cmpl_hi { /* - * The maximum number of VFs that can be allocated to the - * function. This is valid only on the PF with SR-IOV enabled. - * 0xFF... (All Fs) if this command is called on a PF with SR- - * IOV disabled or on a VF. + * This value is the number of duplicate ACKs that have been + * received as part of the TPA operation. */ - uint16_t max_stat_ctx; + uint32_t tpa_dup_acks; /* - * The maximum number of statistic contexts that can be - * allocated to the function. + * This value is the number of duplicate ACKs that have been + * received as part of the TPA operation. */ - uint32_t max_encap_records; + #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf) + #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0 /* - * The maximum number of Encapsulation records that can be - * offloaded by this function. + * This value is the valid when TPA completion is active. It + * indicates the length of the longest segment of the TPA operation + * for LRO mode and the length of the first segment in GRO mode. + * + * This value may be used by GRO software to re-construct the original + * packet stream from the TPA packet. This is the length of all + * but the last segment for GRO. In LRO mode this value may be used + * to indicate MSS size to the stack. */ - uint32_t max_decap_records; + uint16_t tpa_seg_len; + /* unused4 is 16 b */ + uint16_t unused3; + uint16_t errors_v2; /* - * The maximum number of decapsulation records that can be - * offloaded by this function. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint32_t max_tx_em_flows; + #define RX_TPA_END_CMPL_V2 UINT32_C(0x1) + #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe) + #define RX_TPA_END_CMPL_ERRORS_SFT 1 /* - * The maximum number of Exact Match (EM) flows that can be - * offloaded by this function on the TX side. + * This error indicates that there was some sort of problem with + * the BDs for the packet that was found after part of the + * packet was already placed. The packet should be treated as + * invalid. */ - uint32_t max_tx_wm_flows; + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1 /* - * The maximum number of Wildcard Match (WM) flows that can be - * offloaded by this function on the TX side. + * This error occurs when there is a fatal HW problem in + * the chip only. It indicates that there were not + * BDs on chip but that there was adequate reservation. + * provided by the TPA block. */ - uint32_t max_rx_em_flows; + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ + (UINT32_C(0x2) << 1) /* - * The maximum number of Exact Match (EM) flows that can be - * offloaded by this function on the RX side. + * This error occurs when TPA block was not configured to + * reserve adequate BDs for TPA operations on this RX + * ring. All data for the TPA operation was not placed. + * + * This error can also be generated when the number of + * segments is not programmed correctly in TPA and the + * 33 total aggregation buffers allowed for the TPA + * operation has been exceeded. */ - uint32_t max_rx_wm_flows; + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \ + (UINT32_C(0x4) << 1) + #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR + /* unused5 is 16 b */ + uint16_t unused_4; /* - * The maximum number of Wildcard Match (WM) flows that can be - * offloaded by this function on the RX side. + * This is the opaque value that was completed for the TPA start + * completion that corresponds to this TPA end completion. */ - uint32_t max_mcast_filters; + uint32_t start_opaque; +} __attribute__((packed)); + +/* rx_abuf_cmpl (size:128b/16B) */ +struct rx_abuf_cmpl { + uint16_t type; /* - * The maximum number of multicast filters that can be supported - * by this function on the RX side. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - uint32_t max_flow_id; + #define RX_ABUF_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_ABUF_CMPL_TYPE_SFT 0 /* - * The maximum value of flow_id that can be supported in - * completion records. + * RX Aggregation Buffer completion : + * Completion of an L2 aggregation buffer in support of + * TPA, HDS, or Jumbo packet completion. Length = 16B */ - uint32_t max_hw_ring_grps; + #define RX_ABUF_CMPL_TYPE_RX_AGG UINT32_C(0x12) + #define RX_ABUF_CMPL_TYPE_LAST RX_ABUF_CMPL_TYPE_RX_AGG /* - * The maximum number of HW ring groups that can be supported on - * this function. + * This is the length of the data for the packet stored in this + * aggregation buffer identified by the opaque value. This does not + * include the length of any + * data placed in other aggregation BDs or in the packet or buffer + * BDs. This length does not include any space added due to + * hdr_offset register during HDS placement mode. */ - uint16_t max_sp_tx_rings; + uint16_t len; /* - * The maximum number of strict priority transmit rings that can - * be allocated to the function. This number indicates the - * maximum number of TX rings that can be assigned strict - * priorities out of the maximum number of TX rings that can be - * allocated (max_tx_rings) to the function. + * This is a copy of the opaque field from the RX BD this aggregation + * buffer corresponds to. */ - uint8_t unused_0; - uint8_t valid; + uint32_t opaque; + uint32_t v; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ + #define RX_ABUF_CMPL_V UINT32_C(0x1) + /* unused3 is 32 b */ + uint32_t unused_2; } __attribute__((packed)); -/* hwrm_func_qcfg */ -/* - * Description: This command returns the current configuration of a function. - * The input FID value is used to indicate what function is being queried. This - * allows a physical function driver to query virtual functions that are - * children of the physical function. The output FID value is needed to - * configure Rings and MSI-X vectors so their DMA operations appear correctly on - * the PCI bus. This command should be called by every driver after - * 'hwrm_func_cfg' to get the actual number of resources allocated by the HWRM. - * The values returned by hwrm_func_qcfg are the values the driver shall use. - * These values may be different than what was originally requested in the - * 'hwrm_func_cfg' command. - */ -/* Input (24 bytes) */ -struct hwrm_func_qcfg_input { - uint16_t req_type; +/* eject_cmpl (size:128b/16B) */ +struct eject_cmpl { + uint16_t type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - uint16_t cmpl_ring; + #define EJECT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define EJECT_CMPL_TYPE_SFT 0 /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Statistics Ejection Completion: + * Completion of statistics data ejection buffer. + * Length = 16B */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define EJECT_CMPL_TYPE_STAT_EJECT UINT32_C(0x1a) + #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This is the length of the statistics data stored in this + * buffer. */ - uint64_t resp_addr; + uint16_t len; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This is a copy of the opaque field from the RX BD this ejection + * buffer corresponds to. */ - uint16_t fid; + uint32_t opaque; + uint32_t v; /* - * Function ID of the function that is being queried. 0xFF... - * (All Fs) if the query is for the requesting function. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint16_t unused_0[3]; + #define EJECT_CMPL_V UINT32_C(0x1) + /* unused3 is 32 b */ + uint32_t unused_2; } __attribute__((packed)); -/* Output (72 bytes) */ -struct hwrm_func_qcfg_output { - uint16_t error_code; +/* hwrm_cmpl (size:128b/16B) */ +struct hwrm_cmpl { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_CMPL_TYPE_SFT 0 + /* + * HWRM Command Completion: + * Completion of an HWRM command. + */ + #define HWRM_CMPL_TYPE_HWRM_DONE UINT32_C(0x20) + #define HWRM_CMPL_TYPE_LAST HWRM_CMPL_TYPE_HWRM_DONE + /* This is the sequence_id of the HWRM command that has completed. */ + uint16_t sequence_id; + /* unused2 is 32 b */ + uint32_t unused_1; + uint32_t v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_CMPL_V UINT32_C(0x1) + /* unused4 is 32 b */ + uint32_t unused_3; +} __attribute__((packed)); + +/* hwrm_fwd_req_cmpl (size:128b/16B) */ +struct hwrm_fwd_req_cmpl { /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t req_len_type; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - uint16_t fid; + #define HWRM_FWD_REQ_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0 + /* Forwarded HWRM Request */ + #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ UINT32_C(0x22) + #define HWRM_FWD_REQ_CMPL_TYPE_LAST \ + HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ + /* Length of forwarded request in bytes. */ + #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0) + #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6 /* - * FID value. This value is used to identify operations on the - * PCI bus as belonging to a particular PCI function. + * Source ID of this request. + * Typically used in forwarding requests and responses. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM */ - uint16_t port_id; + uint16_t source_id; + /* unused1 is 32 b */ + uint32_t unused0; + /* Address of forwarded request. */ + uint32_t req_buf_addr_v[2]; /* - * Port ID of port that this function is associated with. - * 0xFF... (All Fs) if this function is not associated with any - * port. + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint16_t vlan; + #define HWRM_FWD_REQ_CMPL_V UINT32_C(0x1) + /* Address of forwarded request. */ + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe) + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 +} __attribute__((packed)); + +/* hwrm_fwd_resp_cmpl (size:128b/16B) */ +struct hwrm_fwd_resp_cmpl { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_FWD_RESP_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0 + /* Forwarded HWRM Response */ + #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP UINT32_C(0x24) + #define HWRM_FWD_RESP_CMPL_TYPE_LAST \ + HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP + /* + * Source ID of this response. + * Typically used in forwarding requests and responses. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM + */ + uint16_t source_id; + /* Length of forwarded response in bytes. */ + uint16_t resp_len; + /* unused2 is 16 b */ + uint16_t unused_1; + /* Address of forwarded request. */ + uint32_t resp_buf_addr_v[2]; /* - * This value is the current VLAN setting for this function. The - * value of 0 for this field indicates no priority tagging or - * VLAN is used. This field's format is same as 802.1Q Tag's Tag - * Control Information (TCI) format that includes both Priority - * Code Point (PCP) and VLAN Identifier (VID). + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. */ - uint16_t flags; + #define HWRM_FWD_RESP_CMPL_V UINT32_C(0x1) + /* Address of forwarded request. */ + #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK UINT32_C(0xfffffffe) + #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl (size:128b/16B) */ +struct hwrm_async_event_cmpl { + uint16_t type; /* - * If 1, then magic packet based Out-Of-Box WoL is enabled on - * the port associated with this function. + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \ + #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link status changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE \ + UINT32_C(0x0) + /* Link MTU changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE \ UINT32_C(0x1) - /* - * If 1, then bitmap pattern based Out-Of-Box WoL packet is - * enabled on the port associated with this function. - */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED UINT32_C(0x2) - /* - * If set to 1, then FW based DCBX agent is enabled and running - * on the port associated with this function. If set to 0, then - * DCBX agent is not running in the firmware. - */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \ + /* Link speed changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE \ + UINT32_C(0x2) + /* DCB Configuration changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE \ + UINT32_C(0x3) + /* Port connection not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \ UINT32_C(0x4) + /* Link speed configuration was not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \ + UINT32_C(0x5) + /* Link speed configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \ + UINT32_C(0x6) + /* Port PHY configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE \ + UINT32_C(0x7) + /* Function driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD \ + UINT32_C(0x10) + /* Function driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD \ + UINT32_C(0x11) + /* Function FLR related processing has completed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT \ + UINT32_C(0x12) + /* PF driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD \ + UINT32_C(0x20) + /* PF driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD \ + UINT32_C(0x21) + /* VF Function Level Reset (FLR) */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR \ + UINT32_C(0x30) + /* VF MAC Address Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE \ + UINT32_C(0x31) + /* PF-VF communication channel status change. */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \ + UINT32_C(0x32) + /* VF Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE \ + UINT32_C(0x33) + /* LLFC/PFC Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE \ + UINT32_C(0x34) + /* HWRM Error */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; /* - * Standard TX Ring mode is used for the allocation of TX ring - * and underlying scheduling resources that allow bandwidth - * reservation and limit settings on the queried function. If - * set to 1, then standard TX ring mode is enabled on the - * queried function. If set to 0, then the standard TX ring mode - * is disabled on the queried function. In this extended TX ring - * resource mode, the minimum and maximum bandwidth settings are - * not supported to allow the allocation of TX rings to span - * multiple scheduler nodes. - */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \ - UINT32_C(0x8) + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_status_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link status changed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE \ + UINT32_C(0x0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates link status change */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE \ + UINT32_C(0x1) + /* + * If this bit set to 0, then it indicates that the link + * was up and it went down. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN \ + UINT32_C(0x0) + /* + * If this bit is set to 1, then it indicates that the link + * was down and it went up. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP \ + UINT32_C(0x1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP + /* Indicates the physical port this link status change occur */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK \ + UINT32_C(0xe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT \ + 1 + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 4 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_mtu_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link MTU changed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE \ + UINT32_C(0x1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* The new MTU of the link in bytes. */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link speed changed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* + * When this bit is '1', the link was forced to the + * force_link_speed value. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE \ + UINT32_C(0x1) + /* The new link speed in 100 Mbps units. */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK \ + UINT32_C(0xfffe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT \ + 1 + /* 100Mb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB \ + (UINT32_C(0x1) << 1) + /* 1Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB \ + (UINT32_C(0xa) << 1) + /* 2Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB \ + (UINT32_C(0x14) << 1) + /* 25Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB \ + (UINT32_C(0x19) << 1) + /* 10Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB \ + (UINT32_C(0x64) << 1) + /* 20Mb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB \ + (UINT32_C(0xc8) << 1) + /* 25Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB \ + (UINT32_C(0xfa) << 1) + /* 40Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB \ + (UINT32_C(0x190) << 1) + /* 50Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB \ + (UINT32_C(0x1f4) << 1) + /* 100Gb link speed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB \ + (UINT32_C(0x3e8) << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff0000) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 16 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_dcb_config_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* DCB Configuration changed */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE \ + UINT32_C(0x3) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE + /* Event specific data */ + uint32_t event_data2; + /* ETS configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS \ + UINT32_C(0x1) + /* PFC configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC \ + UINT32_C(0x2) + /* APP configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP \ + UINT32_C(0x4) + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* Priority recommended for RoCE traffic */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT \ + 16 + /* none is 255 */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE \ + (UINT32_C(0xff) << 16) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE + /* Priority recommended for L2 traffic */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK \ + UINT32_C(0xff000000) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT \ + 24 + /* none is 255 */ + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE \ + (UINT32_C(0xff) << 24) + #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST \ + HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_conn_not_allowed { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Port connection not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED \ + UINT32_C(0x4) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* + * This value indicates the current port level enforcement policy + * for the optics module when there is an optical module mismatch + * and port is not connected. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT \ + 16 + /* No enforcement */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE \ + (UINT32_C(0x0) << 16) + /* Disable Transmit side Laser. */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX \ + (UINT32_C(0x1) << 16) + /* Raise a warning message. */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG \ + (UINT32_C(0x2) << 16) + /* Power down the module. */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN \ + (UINT32_C(0x3) << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link speed configuration was not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \ + UINT32_C(0x5) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \ + 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Link speed configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE \ + UINT32_C(0x6) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* + * If set to 1, it indicates that the supported link speeds + * configuration on the port has changed. + * If set to 0, then there is no change in supported link speeds + * configuration. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE \ + UINT32_C(0x10000) + /* + * If set to 1, it indicates that the link speed configuration + * on the port has become illegal or invalid. + * If set to 0, then the link speed configuration on the port is + * legal or valid. + */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG \ + UINT32_C(0x20000) +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_phy_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Port PHY configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE \ + UINT32_C(0x7) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 0 + /* + * If set to 1, it indicates that the FEC + * configuration on the port has changed. + * If set to 0, then there is no change in FEC configuration. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE \ + UINT32_C(0x10000) + /* + * If set to 1, it indicates that the EEE configuration + * on the port has changed. + * If set to 0, then there is no change in EEE configuration + * on the port. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE \ + UINT32_C(0x20000) + /* + * If set to 1, it indicates that the pause configuration + * on the PHY has changed. + * If set to 0, then there is no change in the pause + * configuration on the PHY. + */ + #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE \ + UINT32_C(0x40000) +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_unload { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Function driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD \ + UINT32_C(0x10) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Function ID */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT \ + 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_load { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Function driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD \ + UINT32_C(0x11) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Function ID */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_flr_proc_cmplt { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* Function FLR related processing has completed */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT \ + UINT32_C(0x12) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Function ID */ + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT \ + 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_unload { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* PF driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD \ + UINT32_C(0x20) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PF ID */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 + /* Indicates the physical port this pf belongs to */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK \ + UINT32_C(0x70000) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_load { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* PF driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD \ + UINT32_C(0x21) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* PF ID */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 + /* Indicates the physical port this pf belongs to */ + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK \ + UINT32_C(0x70000) + #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_flr { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* VF Function Level Reset (FLR) */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR UINT32_C(0x30) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* VF ID */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_mac_addr_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* VF MAC Address Change */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE \ + UINT32_C(0x31) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* VF ID */ + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK \ + UINT32_C(0xffff) + #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT \ + 0 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_vf_comm_status_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* PF-VF communication channel status change. */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \ + UINT32_C(0x32) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* + * If this bit is set to 1, then it indicates that the PF-VF + * communication was lost and it is established. + * If this bit set to 0, then it indicates that the PF-VF + * communication was established and it is lost. + */ + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED \ + UINT32_C(0x1) +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_cfg_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* VF Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE \ + UINT32_C(0x33) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* + * Each flag provided in this field indicates a specific VF + * configuration change. At least one of these flags shall be set to 1 + * when an asynchronous event completion of this type is provided + * by the HWRM. + */ + uint32_t event_data1; + /* + * If this bit is set to 1, then the value of MTU + * was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE \ + UINT32_C(0x1) + /* + * If this bit is set to 1, then the value of MRU + * was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE \ + UINT32_C(0x2) + /* + * If this bit is set to 1, then the value of default MAC + * address was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE \ + UINT32_C(0x4) + /* + * If this bit is set to 1, then the value of default VLAN + * was changed on this VF. + * If set to 0, then this bit should be ignored. + */ + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE \ + UINT32_C(0x8) +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_llfc_pfc_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* unused1 is 10 b */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_MASK \ + UINT32_C(0xffc0) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_SFT 6 + /* Identifiers of events. */ + uint16_t event_id; + /* LLFC/PFC Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE \ + UINT32_C(0x34) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates llfc pfc status change */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_MASK \ + UINT32_C(0x3) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_SFT \ + 0 + /* + * If this field set to 1, then it indicates that llfc is + * enabled. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LLFC \ + UINT32_C(0x1) + /* + * If this field is set to 2, then it indicates that pfc + * is enabled. + */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LAST \ + HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC + /* Indicates the physical port this llfc pfc change occur */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_MASK \ + UINT32_C(0x1c) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_SFT \ + 2 + /* PORT ID */ + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_MASK \ + UINT32_C(0x1fffe0) + #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT \ + 5 +} __attribute__((packed)); + +/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */ +struct hwrm_async_event_cmpl_hwrm_error { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT + /* Identifiers of events. */ + uint16_t event_id; + /* HWRM Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR + /* Event specific data */ + uint32_t event_data2; + /* Severity of HWRM Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK \ + UINT32_C(0xff) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 + /* Warning */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING \ + UINT32_C(0x0) + /* Non-fatal Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL \ + UINT32_C(0x1) + /* Fatal Error */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST \ + HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Time stamp for error event */ + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP \ + UINT32_C(0x1) +} __attribute__((packed)); + +/******************* + * hwrm_func_reset * + *******************/ + + +/* hwrm_func_reset_input (size:192b/24B) */ +struct hwrm_func_reset_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id_valid field to be + * configured. + */ + #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) + /* + * The ID of the VF that this PF is trying to reset. + * Only the parent PF shall be allowed to reset a child VF. + * + * A parent PF driver shall use this field only when a specific child VF + * is requested to be reset. + */ + uint16_t vf_id; + /* This value indicates the level of a function reset. */ + uint8_t func_reset_level; + /* + * Reset the caller function and its children VFs (if any). If no + * children functions exist, then reset the caller function only. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL \ + UINT32_C(0x0) + /* Reset the caller function only */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME \ + UINT32_C(0x1) + /* + * Reset all children VFs of the caller function driver if the + * caller is a PF driver. + * It is an error to specify this level by a VF driver. + * It is an error to specify this level by a PF driver with + * no children VFs. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \ + UINT32_C(0x2) + /* + * Reset a specific VF of the caller function driver if the caller + * is the parent PF driver. + * It is an error to specify this level by a VF driver. + * It is an error to specify this level by a PF driver that is not + * the parent of the VF that is being requested to reset. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF \ + UINT32_C(0x3) + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_LAST \ + HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF + uint8_t unused_0; +} __attribute__((packed)); + +/* hwrm_func_reset_output (size:128b/16B) */ +struct hwrm_func_reset_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************** + * hwrm_func_getfid * + ********************/ + + +/* hwrm_func_getfid_input (size:192b/24B) */ +struct hwrm_func_getfid_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the pci_id field to be + * configured. + */ + #define HWRM_FUNC_GETFID_INPUT_ENABLES_PCI_ID UINT32_C(0x1) + /* + * This value is the PCI ID of the queried function. + * If ARI is enabled, then it is + * Bus Number (8b):Function Number(8b). Otherwise, it is + * Bus Number (8b):Device Number (5b):Function Number(3b). + */ + uint16_t pci_id; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_func_getfid_output (size:128b/16B) */ +struct hwrm_func_getfid_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * FID value. This value is used to identify operations on the PCI + * bus as belonging to a particular PCI function. + */ + uint16_t fid; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_func_vf_alloc * + **********************/ + + +/* hwrm_func_vf_alloc_input (size:192b/24B) */ +struct hwrm_func_vf_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the first_vf_id field to be + * configured. + */ + #define HWRM_FUNC_VF_ALLOC_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t first_vf_id; + /* The number of virtual functions requested. */ + uint16_t num_vfs; +} __attribute__((packed)); + +/* hwrm_func_vf_alloc_output (size:128b/16B) */ +struct hwrm_func_vf_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The ID of the first VF allocated. */ + uint16_t first_vf_id; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_func_vf_free * + *********************/ + + +/* hwrm_func_vf_free_input (size:192b/24B) */ +struct hwrm_func_vf_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the first_vf_id field to be + * configured. + */ + #define HWRM_FUNC_VF_FREE_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t first_vf_id; + /* + * The number of virtual functions requested. + * 0xFFFF - Cleanup all children of this PF. + */ + uint16_t num_vfs; +} __attribute__((packed)); + +/* hwrm_func_vf_free_output (size:128b/16B) */ +struct hwrm_func_vf_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************** + * hwrm_func_vf_cfg * + ********************/ + + +/* hwrm_func_vf_cfg_input (size:448b/56B) */ +struct hwrm_func_vf_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the mtu field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU \ + UINT32_C(0x1) + /* + * This bit must be '1' for the guest_vlan field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN \ + UINT32_C(0x2) + /* + * This bit must be '1' for the async_event_cr field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \ + UINT32_C(0x4) + /* + * This bit must be '1' for the dflt_mac_addr field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \ + UINT32_C(0x8) + /* + * This bit must be '1' for the num_rsscos_ctxs field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \ + UINT32_C(0x10) + /* + * This bit must be '1' for the num_cmpl_rings field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \ + UINT32_C(0x20) + /* + * This bit must be '1' for the num_tx_rings field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS \ + UINT32_C(0x40) + /* + * This bit must be '1' for the num_rx_rings field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS \ + UINT32_C(0x80) + /* + * This bit must be '1' for the num_l2_ctxs field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS \ + UINT32_C(0x100) + /* + * This bit must be '1' for the num_vnics field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS \ + UINT32_C(0x200) + /* + * This bit must be '1' for the num_stat_ctxs field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS \ + UINT32_C(0x400) + /* + * This bit must be '1' for the num_hw_ring_grps field to be + * configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \ + UINT32_C(0x800) + /* + * The maximum transmission unit requested on the function. + * The HWRM should make sure that the mtu of + * the function does not exceed the mtu of the physical + * port that this function is associated with. + * + * In addition to requesting mtu per function, it is + * possible to configure mtu per transmit ring. + * By default, the mtu of each transmit ring associated + * with a function is equal to the mtu of the function. + * The HWRM should make sure that the mtu of each transmit + * ring that is assigned to a function has a valid mtu. + */ + uint16_t mtu; + /* + * The guest VLAN for the function being configured. + * This field's format is same as 802.1Q Tag's + * Tag Control Information (TCI) format that includes both + * Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t guest_vlan; + /* + * ID of the target completion ring for receiving asynchronous + * event completions. If this field is not valid, then the + * HWRM shall use the default completion ring of the function + * that is being configured as the target completion ring for + * providing any asynchronous event completions for that + * function. + * If this field is valid, then the HWRM shall use the + * completion ring identified by this ID as the target + * completion ring for providing any asynchronous event + * completions for the function that is being configured. + */ + uint16_t async_event_cr; + /* + * This value is the current MAC address requested by the VF + * driver to be configured on this VF. A value of + * 00-00-00-00-00-00 indicates no MAC address configuration + * is requested by the VF driver. + * The parent PF driver may reject or overwrite this + * MAC address. + */ + uint8_t dflt_mac_addr[6]; + uint32_t flags; + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of TX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST \ + UINT32_C(0x1) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST \ + UINT32_C(0x2) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of CMPL rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \ + UINT32_C(0x4) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RSS ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \ + UINT32_C(0x8) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of ring groups) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \ + UINT32_C(0x10) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of stat ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \ + UINT32_C(0x20) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of VNICs) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \ + UINT32_C(0x40) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of L2 ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \ + UINT32_C(0x80) + /* The number of RSS/COS contexts requested for the VF. */ + uint16_t num_rsscos_ctxs; + /* The number of completion rings requested for the VF. */ + uint16_t num_cmpl_rings; + /* The number of transmit rings requested for the VF. */ + uint16_t num_tx_rings; + /* The number of receive rings requested for the VF. */ + uint16_t num_rx_rings; + /* The number of L2 contexts requested for the VF. */ + uint16_t num_l2_ctxs; + /* The number of vnics requested for the VF. */ + uint16_t num_vnics; + /* The number of statistic contexts requested for the VF. */ + uint16_t num_stat_ctxs; + /* The number of HW ring groups requested for the VF. */ + uint16_t num_hw_ring_grps; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_func_vf_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************* + * hwrm_func_qcaps * + *******************/ + + +/* hwrm_func_qcaps_input (size:192b/24B) */ +struct hwrm_func_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_qcaps_output (size:640b/80B) */ +struct hwrm_func_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * FID value. This value is used to identify operations on the PCI + * bus as belonging to a particular PCI function. + */ + uint16_t fid; + /* + * Port ID of port that this function is associated with. + * Valid only for the PF. + * 0xFF... (All Fs) if this function is not associated with + * any port. + * 0xFF... (All Fs) if this function is called from a VF. + */ + uint16_t port_id; + uint32_t flags; + /* If 1, then Push mode is supported on this function. */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED \ + UINT32_C(0x1) + /* + * If 1, then the global MSI-X auto-masking is enabled for the + * device. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \ + UINT32_C(0x2) + /* + * If 1, then the Precision Time Protocol (PTP) processing + * is supported on this function. + * The HWRM should enable PTP on only a single Physical + * Function (PF) per port. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED \ + UINT32_C(0x4) + /* + * If 1, then RDMA over Converged Ethernet (RoCE) v1 + * is supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED \ + UINT32_C(0x8) + /* + * If 1, then RDMA over Converged Ethernet (RoCE) v2 + * is supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED \ + UINT32_C(0x10) + /* + * If 1, then control and configuration of WoL magic packet + * are supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \ + UINT32_C(0x20) + /* + * If 1, then control and configuration of bitmap pattern + * packet are supported on this function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED \ + UINT32_C(0x40) + /* + * If set to 1, then the control and configuration of rate limit + * of an allocated TX ring on the queried function is supported. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED \ + UINT32_C(0x80) + /* + * If 1, then control and configuration of minimum and + * maximum bandwidths are supported on the queried function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED \ + UINT32_C(0x100) + /* + * If the query is for a VF, then this flag shall be ignored. + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to set the rate limits + * on the TX rings of its children VFs. + * If this query is for a PF and this flag is set to 0, then + * the PF does not have the capability to set the rate limits + * on the TX rings of its children VFs. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \ + UINT32_C(0x200) + /* + * If the query is for a VF, then this flag shall be ignored. + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to set the minimum and/or + * maximum bandwidths for its children VFs. + * If this query is for a PF and this flag is set to 0, then + * the PF does not have the capability to set the minimum or + * maximum bandwidths for its children VFs. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED \ + UINT32_C(0x400) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then standard TX ring mode is supported + * on the queried function. + * If set to 0, then standard TX ring mode is not available + * on the queried function. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \ + UINT32_C(0x800) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect GENEVE tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x1000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect NVGRE tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x2000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect GRE tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GRE_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x4000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to detect MPLS tunnel + * flags. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_MPLS_TUN_FLAGS_SUPPORTED \ + UINT32_C(0x8000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to support pcie stats. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED \ + UINT32_C(0x10000) + /* + * This value is current MAC address configured for this + * function. A value of 00-00-00-00-00-00 indicates no + * MAC address is currently configured. + */ + uint8_t mac_address[6]; + /* + * The maximum number of RSS/COS contexts that can be + * allocated to the function. + */ + uint16_t max_rsscos_ctx; + /* + * The maximum number of completion rings that can be + * allocated to the function. + */ + uint16_t max_cmpl_rings; + /* + * The maximum number of transmit rings that can be + * allocated to the function. + */ + uint16_t max_tx_rings; + /* + * The maximum number of receive rings that can be + * allocated to the function. + */ + uint16_t max_rx_rings; + /* + * The maximum number of L2 contexts that can be + * allocated to the function. + */ + uint16_t max_l2_ctxs; + /* + * The maximum number of VNICs that can be + * allocated to the function. + */ + uint16_t max_vnics; + /* + * The identifier for the first VF enabled on a PF. This + * is valid only on the PF with SR-IOV enabled. + * 0xFF... (All Fs) if this command is called on a PF with + * SR-IOV disabled or on a VF. + */ + uint16_t first_vf_id; + /* + * The maximum number of VFs that can be + * allocated to the function. This is valid only on the + * PF with SR-IOV enabled. 0xFF... (All Fs) if this + * command is called on a PF with SR-IOV disabled or + * on a VF. + */ + uint16_t max_vfs; + /* + * The maximum number of statistic contexts that can be + * allocated to the function. + */ + uint16_t max_stat_ctx; + /* + * The maximum number of Encapsulation records that can be + * offloaded by this function. + */ + uint32_t max_encap_records; + /* + * The maximum number of decapsulation records that can + * be offloaded by this function. + */ + uint32_t max_decap_records; + /* + * The maximum number of Exact Match (EM) flows that can be + * offloaded by this function on the TX side. + */ + uint32_t max_tx_em_flows; + /* + * The maximum number of Wildcard Match (WM) flows that can + * be offloaded by this function on the TX side. + */ + uint32_t max_tx_wm_flows; + /* + * The maximum number of Exact Match (EM) flows that can be + * offloaded by this function on the RX side. + */ + uint32_t max_rx_em_flows; + /* + * The maximum number of Wildcard Match (WM) flows that can + * be offloaded by this function on the RX side. + */ + uint32_t max_rx_wm_flows; + /* + * The maximum number of multicast filters that can + * be supported by this function on the RX side. + */ + uint32_t max_mcast_filters; + /* + * The maximum value of flow_id that can be supported + * in completion records. + */ + uint32_t max_flow_id; + /* + * The maximum number of HW ring groups that can be + * supported on this function. + */ + uint32_t max_hw_ring_grps; + /* + * The maximum number of strict priority transmit rings + * that can be allocated to the function. + * This number indicates the maximum number of TX rings + * that can be assigned strict priorities out of the + * maximum number of TX rings that can be allocated + * (max_tx_rings) to the function. + */ + uint16_t max_sp_tx_rings; + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/****************** + * hwrm_func_qcfg * + ******************/ + + +/* hwrm_func_qcfg_input (size:192b/24B) */ +struct hwrm_func_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_qcfg_output (size:640b/80B) */ +struct hwrm_func_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * FID value. This value is used to identify operations on the PCI + * bus as belonging to a particular PCI function. + */ + uint16_t fid; + /* + * Port ID of port that this function is associated with. + * 0xFF... (All Fs) if this function is not associated with + * any port. + */ + uint16_t port_id; + /* + * This value is the current VLAN setting for this + * function. The value of 0 for this field indicates + * no priority tagging or VLAN is used. + * This field's format is same as 802.1Q Tag's + * Tag Control Information (TCI) format that includes both + * Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t vlan; + uint16_t flags; + /* + * If 1, then magic packet based Out-Of-Box WoL is enabled on + * the port associated with this function. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \ + UINT32_C(0x1) + /* + * If 1, then bitmap pattern based Out-Of-Box WoL packet is enabled + * on the port associated with this function. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED \ + UINT32_C(0x2) + /* + * If set to 1, then FW based DCBX agent is enabled and running on + * the port associated with this function. + * If set to 0, then DCBX agent is not running in the firmware. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \ + UINT32_C(0x4) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then standard TX ring mode is enabled + * on the queried function. + * If set to 0, then the standard TX ring mode is disabled + * on the queried function. In this extended TX ring resource + * mode, the minimum and maximum bandwidth settings are not + * supported to allow the allocation of TX rings to span multiple + * scheduler nodes. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \ + UINT32_C(0x8) + /* + * If set to 1 then FW based LLDP agent is enabled and running on + * the port associated with this function. + * If set to 0 then the LLDP agent is not running in the firmware. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED \ + UINT32_C(0x10) + /* + * If set to 1, then multi-host mode is active for this function. + * If set to 0, then multi-host mode is inactive for this function + * or not applicable for this device. + */ + #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST \ + UINT32_C(0x20) + /* + * This value is current MAC address configured for this + * function. A value of 00-00-00-00-00-00 indicates no + * MAC address is currently configured. + */ + uint8_t mac_address[6]; + /* + * This value is current PCI ID of this + * function. If ARI is enabled, then it is + * Bus Number (8b):Function Number(8b). Otherwise, it is + * Bus Number (8b):Device Number (4b):Function Number(4b). + * If multi-host mode is active, the 4 lsb will indicate + * the PF index for this function. + */ + uint16_t pci_id; + /* + * The number of RSS/COS contexts currently + * allocated to the function. + */ + uint16_t alloc_rsscos_ctx; + /* + * The number of completion rings currently allocated to + * the function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t alloc_cmpl_rings; + /* + * The number of transmit rings currently allocated to + * the function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t alloc_tx_rings; + /* + * The number of receive rings currently allocated to + * the function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t alloc_rx_rings; + /* The allocated number of L2 contexts to the function. */ + uint16_t alloc_l2_ctx; + /* The allocated number of vnics to the function. */ + uint16_t alloc_vnics; + /* + * The maximum transmission unit of the function. + * For rings allocated on this function, this default + * value is used if ring MTU is not specified. + */ + uint16_t mtu; + /* + * The maximum receive unit of the function. + * For vnics allocated on this function, this default + * value is used if vnic MRU is not specified. + */ + uint16_t mru; + /* The statistics context assigned to a function. */ + uint16_t stat_ctx_id; + /* + * The HWRM shall return Unknown value for this field + * when this command is used to query VF's configuration. + */ + uint8_t port_partition_type; + /* Single physical function */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0) + /* Multiple physical functions */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1) + /* Network Partitioning 1.0 */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2) + /* Network Partitioning 1.5 */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 UINT32_C(0x3) + /* Network Partitioning 2.0 */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4) + /* Unknown */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN + /* + * This field will indicate number of physical functions on this port_partition. + * HWRM shall return unavail (i.e. value of 0) for this field + * when this command is used to query VF's configuration or + * from older firmware that doesn't support this field. + */ + uint8_t port_pf_cnt; + /* number of PFs is not available */ + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0) + #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_LAST \ + HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL + /* + * The default VNIC ID assigned to a function that is + * being queried. + */ + uint16_t dflt_vnic_id; + uint16_t max_mtu_configured; + /* + * Minimum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + * A value of 0 indicates the minimum bandwidth is not + * configured. + */ + uint32_t min_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + * A value of 0 indicates that the maximum bandwidth is not + * configured. + */ + uint32_t max_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID + /* + * This value indicates the Edge virtual bridge mode for the + * domain that this function belongs to. + */ + uint8_t evb_mode; + /* No Edge Virtual Bridging (EVB) */ + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0) + /* Virtual Ethernet Bridge (VEB) */ + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1) + /* Virtual Ethernet Port Aggregator (VEPA) */ + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2) + #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA + uint8_t options; + /* + * This value indicates the PCIE device cache line size. + * The cache line size allows the DMA writes to terminate and + * start at the cache boundary. + */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_MASK \ + UINT32_C(0x3) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SFT 0 + /* Cache Line Size 64 bytes */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \ + UINT32_C(0x0) + /* Cache Line Size 128 bytes */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \ + UINT32_C(0x1) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_LAST \ + HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 + /* Reserved for future. */ + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_MASK \ + UINT32_C(0xfc) + #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_SFT 2 + /* + * The number of VFs that are allocated to the function. + * This is valid only on the PF with SR-IOV enabled. + * 0xFF... (All Fs) if this command is called on a PF with + * SR-IOV disabled or on a VF. + */ + uint16_t alloc_vfs; + /* + * The number of allocated multicast filters for this + * function on the RX side. + */ + uint32_t alloc_mcast_filters; + /* + * The number of allocated HW ring groups for this + * function. + */ + uint32_t alloc_hw_ring_grps; + /* + * The number of strict priority transmit rings out of + * currently allocated TX rings to the function + * (alloc_tx_rings). + */ + uint16_t alloc_sp_tx_rings; + /* + * The number of statistics contexts + * currently reserved for the function. + */ + uint16_t alloc_stat_ctx; + /* + * This field specifies how many NQs are reserved for the PF. + * Remaining NQs that belong to the PF are available for VFs. + * Once a PF has created VFs, it cannot change how many NQs are + * reserved for itself (since the NQs must be contiguous in HW). + */ + uint16_t alloc_msix; + uint8_t unused_2[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_func_vlan_qcfg * + ***********************/ + + +/* hwrm_func_vlan_qcfg_input (size:192b/24B) */ +struct hwrm_func_vlan_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being + * configured. + * If set to 0xFF... (All Fs), then the configuration is + * for the requesting function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_vlan_qcfg_output (size:320b/40B) */ +struct hwrm_func_vlan_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; + /* S-TAG VLAN identifier configured for the function. */ + uint16_t stag_vid; + /* S-TAG PCP value configured for the function. */ + uint8_t stag_pcp; + uint8_t unused_1; + /* + * S-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t stag_tpid; + /* C-TAG VLAN identifier configured for the function. */ + uint16_t ctag_vid; + /* C-TAG PCP value configured for the function. */ + uint8_t ctag_pcp; + uint8_t unused_2; + /* + * C-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t ctag_tpid; + /* Future use. */ + uint32_t rsvd2; + /* Future use. */ + uint32_t rsvd3; + uint32_t unused_3; +} __attribute__((packed)); + +/********************** + * hwrm_func_vlan_cfg * + **********************/ + + +/* hwrm_func_vlan_cfg_input (size:384b/48B) */ +struct hwrm_func_vlan_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being + * configured. + * If set to 0xFF... (All Fs), then the configuration is + * for the requesting function. + */ + uint16_t fid; + uint8_t unused_0[2]; + uint32_t enables; + /* + * This bit must be '1' for the stag_vid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1) + /* + * This bit must be '1' for the ctag_vid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2) + /* + * This bit must be '1' for the stag_pcp field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4) + /* + * This bit must be '1' for the ctag_pcp field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8) + /* + * This bit must be '1' for the stag_tpid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10) + /* + * This bit must be '1' for the ctag_tpid field to be + * configured. + */ + #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20) + /* S-TAG VLAN identifier configured for the function. */ + uint16_t stag_vid; + /* S-TAG PCP value configured for the function. */ + uint8_t stag_pcp; + uint8_t unused_1; + /* + * S-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t stag_tpid; + /* C-TAG VLAN identifier configured for the function. */ + uint16_t ctag_vid; + /* C-TAG PCP value configured for the function. */ + uint8_t ctag_pcp; + uint8_t unused_2; + /* + * C-TAG TPID value configured for the function. This field is specified in + * network byte order. + */ + uint16_t ctag_tpid; + /* Future use. */ + uint32_t rsvd1; + /* Future use. */ + uint32_t rsvd2; + uint8_t unused_3[4]; +} __attribute__((packed)); + +/* hwrm_func_vlan_cfg_output (size:128b/16B) */ +struct hwrm_func_vlan_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/***************** + * hwrm_func_cfg * + *****************/ + + +/* hwrm_func_cfg_input (size:704b/88B) */ +struct hwrm_func_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being + * configured. + * If set to 0xFF... (All Fs), then the the configuration is + * for the requesting function. + */ + uint16_t fid; + /* + * This field specifies how many NQs will be reserved for the PF. + * Remaining NQs that belong to the PF become available for VFs. + * Once a PF has created VFs, it cannot change how many NQs are + * reserved for itself (since the NQs must be contiguous in HW). + */ + uint16_t num_msix; + uint32_t flags; + /* + * When this bit is '1', the function is disabled with + * source MAC address check. + * This is an anti-spoofing check. If this flag is set, + * then the function shall be configured to disallow + * transmission of frames with the source MAC address that + * is configured for this function. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \ + UINT32_C(0x1) + /* + * When this bit is '1', the function is enabled with + * source MAC address check. + * This is an anti-spoofing check. If this flag is set, + * then the function shall be configured to allow + * transmission of frames with the source MAC address that + * is configured for this function. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK \ + UINT32_C(0x1fc) + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2 + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then standard TX ring mode is requested to be + * enabled on the function being configured. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \ + UINT32_C(0x200) + /* + * Standard TX Ring mode is used for the allocation of TX ring + * and underlying scheduling resources that allow bandwidth + * reservation and limit settings on the queried function. + * If set to 1, then the standard TX ring mode is requested to + * be disabled on the function being configured. In this extended + * TX ring resource mode, the minimum and maximum bandwidth settings + * are not supported to allow the allocation of TX rings to + * span multiple scheduler nodes. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \ + UINT32_C(0x400) + /* + * If this bit is set, virtual mac address configured + * in this command will be persistent over warm boot. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST \ + UINT32_C(0x800) + /* + * This bit only applies to the VF. If this bit is set, the statistic + * context counters will not be cleared when the statistic context is freed + * or a function reset is called on VF. This bit will be cleared when the PF + * is unloaded or a function reset is called on the PF. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \ + UINT32_C(0x1000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of TX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST \ + UINT32_C(0x2000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RX rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RX_ASSETS_TEST \ + UINT32_C(0x4000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of CMPL rings) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \ + UINT32_C(0x8000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of RSS ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \ + UINT32_C(0x10000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of ring groups) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \ + UINT32_C(0x20000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of stat ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \ + UINT32_C(0x40000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of VNICs) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \ + UINT32_C(0x80000) + /* + * This bit requests that the firmware test to see if all the assets + * requested in this command (i.e. number of L2 ctx) are available. + * The firmware will return an error if the requested assets are + * not available. The firwmare will NOT reserve the assets if they + * are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \ + UINT32_C(0x100000) + uint32_t enables; + /* + * This bit must be '1' for the mtu field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU \ + UINT32_C(0x1) + /* + * This bit must be '1' for the mru field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU \ + UINT32_C(0x2) + /* + * This bit must be '1' for the num_rsscos_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \ + UINT32_C(0x4) + /* + * This bit must be '1' for the num_cmpl_rings field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \ + UINT32_C(0x8) + /* + * This bit must be '1' for the num_tx_rings field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS \ + UINT32_C(0x10) + /* + * This bit must be '1' for the num_rx_rings field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS \ + UINT32_C(0x20) + /* + * This bit must be '1' for the num_l2_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS \ + UINT32_C(0x40) + /* + * This bit must be '1' for the num_vnics field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS \ + UINT32_C(0x80) + /* + * This bit must be '1' for the num_stat_ctxs field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS \ + UINT32_C(0x100) + /* + * This bit must be '1' for the dflt_mac_addr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \ + UINT32_C(0x200) + /* + * This bit must be '1' for the dflt_vlan field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN \ + UINT32_C(0x400) + /* + * This bit must be '1' for the dflt_ip_addr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR \ + UINT32_C(0x800) + /* + * This bit must be '1' for the min_bw field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the max_bw field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the async_event_cr field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the vlan_antispoof_mode field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the allowed_vlan_pris field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the evb_mode field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE \ + UINT32_C(0x20000) + /* + * This bit must be '1' for the num_mcast_filters field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS \ + UINT32_C(0x40000) + /* + * This bit must be '1' for the num_hw_ring_grps field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \ + UINT32_C(0x80000) + /* + * This bit must be '1' for the cache_linesize field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_CACHE_LINESIZE \ + UINT32_C(0x100000) + /* + * This bit must be '1' for the num_msix field to be + * configured. + */ + #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX \ + UINT32_C(0x200000) + /* + * The maximum transmission unit of the function. + * The HWRM should make sure that the mtu of + * the function does not exceed the mtu of the physical + * port that this function is associated with. + * + * In addition to configuring mtu per function, it is + * possible to configure mtu per transmit ring. + * By default, the mtu of each transmit ring associated + * with a function is equal to the mtu of the function. + * The HWRM should make sure that the mtu of each transmit + * ring that is assigned to a function has a valid mtu. + */ + uint16_t mtu; + /* + * The maximum receive unit of the function. + * The HWRM should make sure that the mru of + * the function does not exceed the mru of the physical + * port that this function is associated with. + * + * In addition to configuring mru per function, it is + * possible to configure mru per vnic. + * By default, the mru of each vnic associated + * with a function is equal to the mru of the function. + * The HWRM should make sure that the mru of each vnic + * that is assigned to a function has a valid mru. + */ + uint16_t mru; + /* + * The number of RSS/COS contexts requested for the + * function. + */ + uint16_t num_rsscos_ctxs; + /* + * The number of completion rings requested for the + * function. This does not include the rings allocated + * to any children functions if any. + */ + uint16_t num_cmpl_rings; + /* + * The number of transmit rings requested for the function. + * This does not include the rings allocated to any + * children functions if any. + */ + uint16_t num_tx_rings; + /* + * The number of receive rings requested for the function. + * This does not include the rings allocated + * to any children functions if any. + */ + uint16_t num_rx_rings; + /* The requested number of L2 contexts for the function. */ + uint16_t num_l2_ctxs; + /* The requested number of vnics for the function. */ + uint16_t num_vnics; + /* The requested number of statistic contexts for the function. */ + uint16_t num_stat_ctxs; + /* + * The number of HW ring groups that should + * be reserved for this function. + */ + uint16_t num_hw_ring_grps; + /* The default MAC address for the function being configured. */ + uint8_t dflt_mac_addr[6]; + /* + * The default VLAN for the function being configured. + * This field's format is same as 802.1Q Tag's + * Tag Control Information (TCI) format that includes both + * Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t dflt_vlan; + /* + * The default IP address for the function being configured. + * This address is only used in enabling source property check. + */ + uint32_t dflt_ip_addr[4]; + /* + * Minimum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + */ + uint32_t min_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \ + HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated for this function. + * The HWRM will translate this value into byte counter and + * time interval used for the scheduler inside the device. + */ + uint32_t max_bw; + /* The bandwidth value. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \ + HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID + /* + * ID of the target completion ring for receiving asynchronous + * event completions. If this field is not valid, then the + * HWRM shall use the default completion ring of the function + * that is being configured as the target completion ring for + * providing any asynchronous event completions for that + * function. + * If this field is valid, then the HWRM shall use the + * completion ring identified by this ID as the target + * completion ring for providing any asynchronous event + * completions for the function that is being configured. + */ + uint16_t async_event_cr; + /* VLAN Anti-spoofing mode. */ + uint8_t vlan_antispoof_mode; + /* No VLAN anti-spoofing checks are enabled */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK \ + UINT32_C(0x0) + /* Validate VLAN against the configured VLAN(s) */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \ + UINT32_C(0x1) + /* Insert VLAN if it does not exist, otherwise discard */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \ + UINT32_C(0x2) + /* Insert VLAN if it does not exist, override VLAN if it exists */ + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \ + UINT32_C(0x3) + #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_LAST \ + HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN + /* + * This bit field defines VLAN PRIs that are allowed on + * this function. + * If nth bit is set, then VLAN PRI n is allowed on this + * function. + */ + uint8_t allowed_vlan_pris; + /* + * The HWRM shall allow a PF driver to change EVB mode for the + * partition it belongs to. + * The HWRM shall not allow a VF driver to change the EVB mode. + * The HWRM shall take into account the switching of EVB mode + * from one to another and reconfigure hardware resources as + * appropriately. + * The switching from VEB to VEPA mode requires + * the disabling of the loopback traffic. Additionally, + * source knock outs are handled differently in VEB and VEPA + * modes. + */ + uint8_t evb_mode; + /* No Edge Virtual Bridging (EVB) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0) + /* Virtual Ethernet Bridge (VEB) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1) + /* Virtual Ethernet Port Aggregator (VEPA) */ + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2) + #define HWRM_FUNC_CFG_INPUT_EVB_MODE_LAST \ + HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA + uint8_t options; + /* + * This value indicates the PCIE device cache line size. + * The cache line size allows the DMA writes to terminate and + * start at the cache boundary. + */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_MASK \ + UINT32_C(0x3) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SFT 0 + /* Cache Line Size 64 bytes */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \ + UINT32_C(0x0) + /* Cache Line Size 128 bytes */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \ + UINT32_C(0x1) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_LAST \ + HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 + /* Reserved for future. */ + #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_MASK \ + UINT32_C(0xfc) + #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_SFT 2 + /* + * The number of multicast filters that should + * be reserved for this function on the RX side. + */ + uint16_t num_mcast_filters; +} __attribute__((packed)); + +/* hwrm_func_cfg_output (size:128b/16B) */ +struct hwrm_func_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************** + * hwrm_func_qstats * + ********************/ + + +/* hwrm_func_qstats_input (size:192b/24B) */ +struct hwrm_func_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_qstats_output (size:1408b/176B) */ +struct hwrm_func_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of transmitted unicast packets on the function. */ + uint64_t tx_ucast_pkts; + /* Number of transmitted multicast packets on the function. */ + uint64_t tx_mcast_pkts; + /* Number of transmitted broadcast packets on the function. */ + uint64_t tx_bcast_pkts; + /* + * Number of transmitted packets that were discarded due to + * internal NIC resource problems. For transmit, this + * can only happen if TMP is configured to allow dropping + * in HOL blocking conditions, which is not a normal + * configuration. + */ + uint64_t tx_discard_pkts; + /* + * Number of dropped packets on transmit path on the function. + * These are packets that have been marked for drop by + * the TE CFA block or are packets that exceeded the + * transmit MTU limit for the function. + */ + uint64_t tx_drop_pkts; + /* Number of transmitted bytes for unicast traffic on the function. */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for multicast traffic on the function. */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic on the function. */ + uint64_t tx_bcast_bytes; + /* Number of received unicast packets on the function. */ + uint64_t rx_ucast_pkts; + /* Number of received multicast packets on the function. */ + uint64_t rx_mcast_pkts; + /* Number of received broadcast packets on the function. */ + uint64_t rx_bcast_pkts; + /* + * Number of received packets that were discarded on the function + * due to resource limitations. This can happen for 3 reasons. + * # The BD used for the packet has a bad format. + * # There were no BDs available in the ring for the packet. + * # There were no BDs available on-chip for the packet. + */ + uint64_t rx_discard_pkts; + /* + * Number of dropped packets on received path on the function. + * These are packets that have been marked for drop by the + * RE CFA. + */ + uint64_t rx_drop_pkts; + /* Number of received bytes for unicast traffic on the function. */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for multicast traffic on the function. */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for broadcast traffic on the function. */ + uint64_t rx_bcast_bytes; + /* Number of aggregated unicast packets on the function. */ + uint64_t rx_agg_pkts; + /* Number of aggregated unicast bytes on the function. */ + uint64_t rx_agg_bytes; + /* Number of aggregation events on the function. */ + uint64_t rx_agg_events; + /* Number of aborted aggregations on the function. */ + uint64_t rx_agg_aborts; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_func_clr_stats * + ***********************/ + + +/* hwrm_func_clr_stats_input (size:192b/24B) */ +struct hwrm_func_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_clr_stats_output (size:128b/16B) */ +struct hwrm_func_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_func_vf_resc_free * + **************************/ + + +/* hwrm_func_vf_resc_free_input (size:192b/24B) */ +struct hwrm_func_vf_resc_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_vf_resc_free_output (size:128b/16B) */ +struct hwrm_func_vf_resc_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************************* + * hwrm_func_vf_vnic_ids_query * + *******************************/ + + +/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */ +struct hwrm_func_vf_vnic_ids_query_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + uint8_t unused_0[2]; + /* Max number of vnic ids in vnic id table */ + uint32_t max_vnic_id_cnt; + /* This is the address for VF VNIC ID table */ + uint64_t vnic_id_tbl_addr; +} __attribute__((packed)); + +/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */ +struct hwrm_func_vf_vnic_ids_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * Actual number of vnic ids + * + * Each VNIC ID is written as a 32-bit number. + */ + uint32_t vnic_id_cnt; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_func_drv_rgtr * + **********************/ + + +/* hwrm_func_drv_rgtr_input (size:896b/112B) */ +struct hwrm_func_drv_rgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', the function driver is requesting + * all requests from its children VF drivers to be + * forwarded to itself. + * This flag can only be set by the PF driver. + * If a VF driver sets this flag, it should be ignored + * by the HWRM. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1) + /* + * When this bit is '1', the function is requesting none of + * the requests from its children VF drivers to be + * forwarded to itself. + * This flag can only be set by the PF driver. + * If a VF driver sets this flag, it should be ignored + * by the HWRM. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2) + /* + * When this bit is '1', then ver_maj_8b, ver_min_8b, ver_upd_8b + * fields shall be ignored and ver_maj, ver_min, ver_upd + * and ver_patch shall be used for the driver version information. + * When this bit is '0', then ver_maj_8b, ver_min_8b, ver_upd_8b + * fields shall be used for the driver version information and + * ver_maj, ver_min, ver_upd and ver_patch shall be ignored. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_16BIT_VER_MODE UINT32_C(0x4) + uint32_t enables; + /* + * This bit must be '1' for the os_type field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE \ + UINT32_C(0x1) + /* + * This bit must be '1' for the ver field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER \ + UINT32_C(0x2) + /* + * This bit must be '1' for the timestamp field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP \ + UINT32_C(0x4) + /* + * This bit must be '1' for the vf_req_fwd field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD \ + UINT32_C(0x8) + /* + * This bit must be '1' for the async_event_fwd field to be + * configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD \ + UINT32_C(0x10) + /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */ + uint16_t os_type; + /* Unknown */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0) + /* Other OS not listed below. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1) + /* MSDOS OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe) + /* Windows OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12) + /* Solaris OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d) + /* Linux OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24) + /* FreeBSD OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a) + /* VMware ESXi OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68) + /* Microsoft Windows 8 64-bit OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73) + /* Microsoft Windows Server 2012 R2 OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74) + /* UEFI driver. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI UINT32_C(0x8000) + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LAST \ + HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI + /* This is the 8bit major version of the driver. */ + uint8_t ver_maj_8b; + /* This is the 8bit minor version of the driver. */ + uint8_t ver_min_8b; + /* This is the 8bit update version of the driver. */ + uint8_t ver_upd_8b; + uint8_t unused_0[3]; + /* + * This is a 32-bit timestamp provided by the driver for + * keep alive. + * The timestamp is in multiples of 1ms. + */ + uint32_t timestamp; + uint8_t unused_1[4]; + /* + * This is a 256-bit bit mask provided by the PF driver for + * letting the HWRM know what commands issued by the VF driver + * to the HWRM should be forwarded to the PF driver. + * Nth bit refers to the Nth req_type. + * + * Setting Nth bit to 1 indicates that requests from the + * VF driver with req_type equal to N shall be forwarded to + * the parent PF driver. + * + * This field is not valid for the VF driver. + */ + uint32_t vf_req_fwd[8]; + /* + * This is a 256-bit bit mask provided by the function driver + * (PF or VF driver) to indicate the list of asynchronous event + * completions to be forwarded. + * + * Nth bit refers to the Nth event_id. + * + * Setting Nth bit to 1 by the function driver shall result in + * the HWRM forwarding asynchronous event completion with + * event_id equal to N. + * + * If all bits are set to 0 (value of 0), then the HWRM shall + * not forward any asynchronous event completion to this + * function driver. + */ + uint32_t async_event_fwd[8]; + /* This is the 16bit major version of the driver. */ + uint16_t ver_maj; + /* This is the 16bit minor version of the driver. */ + uint16_t ver_min; + /* This is the 16bit update version of the driver. */ + uint16_t ver_upd; + /* This is the 16bit patch version of the driver. */ + uint16_t ver_patch; +} __attribute__((packed)); + +/* hwrm_func_drv_rgtr_output (size:128b/16B) */ +struct hwrm_func_drv_rgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************ + * hwrm_func_drv_unrgtr * + ************************/ + + +/* hwrm_func_drv_unrgtr_input (size:192b/24B) */ +struct hwrm_func_drv_unrgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is '1', the function driver is notifying + * the HWRM to prepare for the shutdown. + */ + #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \ + UINT32_C(0x1) + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_func_drv_unrgtr_output (size:128b/16B) */ +struct hwrm_func_drv_unrgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_func_buf_rgtr * + **********************/ + + +/* hwrm_func_buf_rgtr_input (size:1024b/128B) */ +struct hwrm_func_buf_rgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id field to be + * configured. + */ + #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) + /* + * This bit must be '1' for the err_buf_addr field to be + * configured. + */ + #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + /* + * This field represents the number of pages used for request + * buffer(s). + */ + uint16_t req_buf_num_pages; + /* + * This field represents the page size used for request + * buffer(s). + */ + uint16_t req_buf_page_size; + /* 16 bytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_16B UINT32_C(0x4) + /* 4 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4K UINT32_C(0xc) + /* 8 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_8K UINT32_C(0xd) + /* 64 Kbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_64K UINT32_C(0x10) + /* 2 Mbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_2M UINT32_C(0x15) + /* 4 Mbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4M UINT32_C(0x16) + /* 1 Gbytes */ + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G UINT32_C(0x1e) + #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_LAST \ + HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G + /* The length of the request buffer per VF in bytes. */ + uint16_t req_buf_len; + /* The length of the response buffer in bytes. */ + uint16_t resp_buf_len; + uint8_t unused_0[2]; + /* This field represents the page address of page #0. */ + uint64_t req_buf_page_addr0; + /* This field represents the page address of page #1. */ + uint64_t req_buf_page_addr1; + /* This field represents the page address of page #2. */ + uint64_t req_buf_page_addr2; + /* This field represents the page address of page #3. */ + uint64_t req_buf_page_addr3; + /* This field represents the page address of page #4. */ + uint64_t req_buf_page_addr4; + /* This field represents the page address of page #5. */ + uint64_t req_buf_page_addr5; + /* This field represents the page address of page #6. */ + uint64_t req_buf_page_addr6; + /* This field represents the page address of page #7. */ + uint64_t req_buf_page_addr7; + /* This field represents the page address of page #8. */ + uint64_t req_buf_page_addr8; + /* This field represents the page address of page #9. */ + uint64_t req_buf_page_addr9; + /* + * This field is used to receive the error reporting from + * the chipset. Only applicable for PFs. + */ + uint64_t error_buf_addr; + /* + * This field is used to receive the response forwarded by the + * HWRM. + */ + uint64_t resp_buf_addr; +} __attribute__((packed)); + +/* hwrm_func_buf_rgtr_output (size:128b/16B) */ +struct hwrm_func_buf_rgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************ + * hwrm_func_buf_unrgtr * + ************************/ + + +/* hwrm_func_buf_unrgtr_input (size:192b/24B) */ +struct hwrm_func_buf_unrgtr_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id field to be + * configured. + */ + #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) + /* + * This value is used to identify a Virtual Function (VF). + * The scope of VF ID is local within a PF. + */ + uint16_t vf_id; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_func_buf_unrgtr_output (size:128b/16B) */ +struct hwrm_func_buf_unrgtr_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_func_drv_qver * + **********************/ + + +/* hwrm_func_drv_qver_input (size:192b/24B) */ +struct hwrm_func_drv_qver_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Reserved for future use. */ + uint32_t reserved; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_func_drv_qver_output (size:192b/24B) */ +struct hwrm_func_drv_qver_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */ + uint16_t os_type; + /* Unknown */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UNKNOWN UINT32_C(0x0) + /* Other OS not listed below. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_OTHER UINT32_C(0x1) + /* MSDOS OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_MSDOS UINT32_C(0xe) + /* Windows OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WINDOWS UINT32_C(0x12) + /* Solaris OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_SOLARIS UINT32_C(0x1d) + /* Linux OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LINUX UINT32_C(0x24) + /* FreeBSD OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_FREEBSD UINT32_C(0x2a) + /* VMware ESXi OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_ESXI UINT32_C(0x68) + /* Microsoft Windows 8 64-bit OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN864 UINT32_C(0x73) + /* Microsoft Windows Server 2012 R2 OS. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74) + /* UEFI driver. */ + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI UINT32_C(0x8000) + #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LAST \ + HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI + /* This is the 8bit major version of the driver. */ + uint8_t ver_maj_8b; + /* This is the 8bit minor version of the driver. */ + uint8_t ver_min_8b; + /* This is the 8bit update version of the driver. */ + uint8_t ver_upd_8b; + uint8_t unused_0[2]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; + /* This is the 16bit major version of the driver. */ + uint16_t ver_maj; + /* This is the 16bit minor version of the driver. */ + uint16_t ver_min; + /* This is the 16bit update version of the driver. */ + uint16_t ver_upd; + /* This is the 16bit patch version of the driver. */ + uint16_t ver_patch; +} __attribute__((packed)); + +/**************************** + * hwrm_func_resource_qcaps * + ****************************/ + + +/* hwrm_func_resource_qcaps_input (size:192b/24B) */ +struct hwrm_func_resource_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Function ID of the function that is being queried. + * 0xFF... (All Fs) if the query is for the requesting + * function. + */ + uint16_t fid; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_func_resource_qcaps_output (size:448b/56B) */ +struct hwrm_func_resource_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Maximum guaranteed number of VFs supported by PF. Not applicable for VFs. */ + uint16_t max_vfs; + /* Maximum guaranteed number of MSI-X vectors supported by function */ + uint16_t max_msix; + /* Hint of strategy to be used by PF driver to reserve resources for its VF */ + uint16_t vf_reservation_strategy; + /* The PF driver should evenly divide its remaining resources among all VFs. */ + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL \ + UINT32_C(0x0) + /* The PF driver should only reserve minimal resources for each VF. */ + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL \ + UINT32_C(0x1) + /* + * The PF driver should not reserve any resources for each VF until the + * the VF interface is brought up. + */ + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC \ + UINT32_C(0x2) + #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_LAST \ + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC + /* Minimum guaranteed number of RSS/COS contexts */ + uint16_t min_rsscos_ctx; + /* Maximum non-guaranteed number of RSS/COS contexts */ + uint16_t max_rsscos_ctx; + /* Minimum guaranteed number of completion rings */ + uint16_t min_cmpl_rings; + /* Maximum non-guaranteed number of completion rings */ + uint16_t max_cmpl_rings; + /* Minimum guaranteed number of transmit rings */ + uint16_t min_tx_rings; + /* Maximum non-guaranteed number of transmit rings */ + uint16_t max_tx_rings; + /* Minimum guaranteed number of receive rings */ + uint16_t min_rx_rings; + /* Maximum non-guaranteed number of receive rings */ + uint16_t max_rx_rings; + /* Minimum guaranteed number of L2 contexts */ + uint16_t min_l2_ctxs; + /* Maximum non-guaranteed number of L2 contexts */ + uint16_t max_l2_ctxs; + /* Minimum guaranteed number of VNICs */ + uint16_t min_vnics; + /* Maximum non-guaranteed number of VNICs */ + uint16_t max_vnics; + /* Minimum guaranteed number of statistic contexts */ + uint16_t min_stat_ctx; + /* Maximum non-guaranteed number of statistic contexts */ + uint16_t max_stat_ctx; + /* Minimum guaranteed number of ring groups */ + uint16_t min_hw_ring_grps; + /* Maximum non-guaranteed number of ring groups */ + uint16_t max_hw_ring_grps; + /* + * Maximum number of inputs into the transmit scheduler for this function. + * The number of TX rings assigned to the function cannot exceed this value. + */ + uint16_t max_tx_scheduler_inputs; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/***************************** + * hwrm_func_vf_resource_cfg * + *****************************/ + + +/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */ +struct hwrm_func_vf_resource_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* VF ID that is being configured by PF */ + uint16_t vf_id; + /* Maximum guaranteed number of MSI-X vectors for the function */ + uint16_t max_msix; + /* Minimum guaranteed number of RSS/COS contexts */ + uint16_t min_rsscos_ctx; + /* Maximum non-guaranteed number of RSS/COS contexts */ + uint16_t max_rsscos_ctx; + /* Minimum guaranteed number of completion rings */ + uint16_t min_cmpl_rings; + /* Maximum non-guaranteed number of completion rings */ + uint16_t max_cmpl_rings; + /* Minimum guaranteed number of transmit rings */ + uint16_t min_tx_rings; + /* Maximum non-guaranteed number of transmit rings */ + uint16_t max_tx_rings; + /* Minimum guaranteed number of receive rings */ + uint16_t min_rx_rings; + /* Maximum non-guaranteed number of receive rings */ + uint16_t max_rx_rings; + /* Minimum guaranteed number of L2 contexts */ + uint16_t min_l2_ctxs; + /* Maximum non-guaranteed number of L2 contexts */ + uint16_t max_l2_ctxs; + /* Minimum guaranteed number of VNICs */ + uint16_t min_vnics; + /* Maximum non-guaranteed number of VNICs */ + uint16_t max_vnics; + /* Minimum guaranteed number of statistic contexts */ + uint16_t min_stat_ctx; + /* Maximum non-guaranteed number of statistic contexts */ + uint16_t max_stat_ctx; + /* Minimum guaranteed number of ring groups */ + uint16_t min_hw_ring_grps; + /* Maximum non-guaranteed number of ring groups */ + uint16_t max_hw_ring_grps; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */ +struct hwrm_func_vf_resource_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Reserved number of RSS/COS contexts */ + uint16_t reserved_rsscos_ctx; + /* Reserved number of completion rings */ + uint16_t reserved_cmpl_rings; + /* Reserved number of transmit rings */ + uint16_t reserved_tx_rings; + /* Reserved number of receive rings */ + uint16_t reserved_rx_rings; + /* Reserved number of L2 contexts */ + uint16_t reserved_l2_ctxs; + /* Reserved number of VNICs */ + uint16_t reserved_vnics; + /* Reserved number of statistic contexts */ + uint16_t reserved_stat_ctx; + /* Reserved number of ring groups */ + uint16_t reserved_hw_ring_grps; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************************* + * hwrm_func_backing_store_qcaps * + *********************************/ + + +/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_func_backing_store_qcaps_output (size:512b/64B) */ +struct hwrm_func_backing_store_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Maximum number of QP context entries supported for this function. */ + uint32_t qp_max_entries; + /* + * Minimum number of QP context entries that are needed to be reserved + * for QP1 for the PF and its VFs. PF drivers must allocate at least + * this many QP context entries, even if RoCE will not be used. + */ + uint16_t qp_min_qp1_entries; + /* Maximum number of QP context entries that can be used for L2. */ + uint16_t qp_max_l2_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t qp_entry_size; + /* Maximum number of SRQ context entries that can be used for L2. */ + uint16_t srq_max_l2_entries; + /* Maximum number of SRQ context entries supported for this function. */ + uint32_t srq_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t srq_entry_size; + /* Maximum number of CQ context entries that can be used for L2. */ + uint16_t cq_max_l2_entries; + /* Maximum number of CQ context entries supported for this function. */ + uint32_t cq_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t cq_entry_size; + /* Maximum number of VNIC context entries supported for this function. */ + uint16_t vnic_max_vnic_entries; + /* Maximum number of Ring table context entries supported for this function. */ + uint16_t vnic_max_ring_table_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t vnic_entry_size; + /* Maximum number of statistic context entries supported for this function. */ + uint32_t stat_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t stat_entry_size; + /* Maximum number of TQM context entries supported per ring. */ + uint16_t tqm_max_entries_per_ring; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t tqm_entry_size; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t mrav_entry_size; + /* Maximum number of MR/AV context entries supported for this function. */ + uint32_t mrav_max_entries; + /* Maximum number of Timer context entries supported for this function. */ + uint32_t tim_max_entries; + /* Number of bytes that must be allocated for each context entry. */ + uint16_t tim_entry_size; + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************************* + * hwrm_func_backing_store_cfg * + *******************************/ + + +/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */ +struct hwrm_func_backing_store_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When set, the firmware only uses on-chip resources and does not + * expect any backing store to be provided by the host driver. This + * mode provides minimal L2 functionality (e.g. limited L2 resources, + * no RoCE). + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_PREBOOT_MODE \ + UINT32_C(0x1) + uint32_t enables; + /* + * This bit must be '1' for the qp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP \ + UINT32_C(0x1) + /* + * This bit must be '1' for the srq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ \ + UINT32_C(0x2) + /* + * This bit must be '1' for the cq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ \ + UINT32_C(0x4) + /* + * This bit must be '1' for the vnic fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC \ + UINT32_C(0x8) + /* + * This bit must be '1' for the stat fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT \ + UINT32_C(0x10) + /* + * This bit must be '1' for the tqm_sp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP \ + UINT32_C(0x20) + /* + * This bit must be '1' for the tqm_ring0 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING0 \ + UINT32_C(0x40) + /* + * This bit must be '1' for the tqm_ring1 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING1 \ + UINT32_C(0x80) + /* + * This bit must be '1' for the tqm_ring2 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING2 \ + UINT32_C(0x100) + /* + * This bit must be '1' for the tqm_ring3 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING3 \ + UINT32_C(0x200) + /* + * This bit must be '1' for the tqm_ring4 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING4 \ + UINT32_C(0x400) + /* + * This bit must be '1' for the tqm_ring5 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING5 \ + UINT32_C(0x800) + /* + * This bit must be '1' for the tqm_ring6 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING6 \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the tqm_ring7 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING7 \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the mrav fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the tim fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM \ + UINT32_C(0x8000) + /* QPC page size and level. */ + uint8_t qpc_pg_size_qpc_lvl; + /* QPC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 + /* QPC page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G + /* SRQ page size and level. */ + uint8_t srq_pg_size_srq_lvl; + /* SRQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 + /* SRQ page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G + /* CQ page size and level. */ + uint8_t cq_pg_size_cq_lvl; + /* CQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 + /* CQ page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G + /* VNIC page size and level. */ + uint8_t vnic_pg_size_vnic_lvl; + /* VNIC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 + /* VNIC page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G + /* Stat page size and level. */ + uint8_t stat_pg_size_stat_lvl; + /* Stat PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 + /* Stat page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G + /* TQM slow path page size and level. */ + uint8_t tqm_sp_pg_size_tqm_sp_lvl; + /* TQM slow path PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 + /* TQM slow path page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G + /* TQM ring 0 page size and level. */ + uint8_t tqm_ring0_pg_size_tqm_ring0_lvl; + /* TQM ring 0 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 + /* TQM ring 0 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G + /* TQM ring 1 page size and level. */ + uint8_t tqm_ring1_pg_size_tqm_ring1_lvl; + /* TQM ring 1 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 + /* TQM ring 1 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G + /* TQM ring 2 page size and level. */ + uint8_t tqm_ring2_pg_size_tqm_ring2_lvl; + /* TQM ring 2 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 + /* TQM ring 2 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G + /* TQM ring 3 page size and level. */ + uint8_t tqm_ring3_pg_size_tqm_ring3_lvl; + /* TQM ring 3 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 + /* TQM ring 3 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G + /* TQM ring 4 page size and level. */ + uint8_t tqm_ring4_pg_size_tqm_ring4_lvl; + /* TQM ring 4 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 + /* TQM ring 4 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G + /* TQM ring 5 page size and level. */ + uint8_t tqm_ring5_pg_size_tqm_ring5_lvl; + /* TQM ring 5 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 + /* TQM ring 5 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G + /* TQM ring 6 page size and level. */ + uint8_t tqm_ring6_pg_size_tqm_ring6_lvl; + /* TQM ring 6 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 + /* TQM ring 6 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G + /* TQM ring 7 page size and level. */ + uint8_t tqm_ring7_pg_size_tqm_ring7_lvl; + /* TQM ring 7 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 + /* TQM ring 7 page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G + /* MR/AV page size and level. */ + uint8_t mrav_pg_size_mrav_lvl; + /* MR/AV PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 + /* MR/AV page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G + /* Timer page size and level. */ + uint8_t tim_pg_size_tim_lvl; + /* Timer PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 + /* Timer page size. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G + /* QP page directory. */ + uint64_t qpc_page_dir; + /* SRQ page directory. */ + uint64_t srq_page_dir; + /* CQ page directory. */ + uint64_t cq_page_dir; + /* VNIC page directory. */ + uint64_t vnic_page_dir; + /* Stat page directory. */ + uint64_t stat_page_dir; + /* TQM slowpath page directory. */ + uint64_t tqm_sp_page_dir; + /* TQM ring 0 page directory. */ + uint64_t tqm_ring0_page_dir; + /* TQM ring 1 page directory. */ + uint64_t tqm_ring1_page_dir; + /* TQM ring 2 page directory. */ + uint64_t tqm_ring2_page_dir; + /* TQM ring 3 page directory. */ + uint64_t tqm_ring3_page_dir; + /* TQM ring 4 page directory. */ + uint64_t tqm_ring4_page_dir; + /* TQM ring 5 page directory. */ + uint64_t tqm_ring5_page_dir; + /* TQM ring 6 page directory. */ + uint64_t tqm_ring6_page_dir; + /* TQM ring 7 page directory. */ + uint64_t tqm_ring7_page_dir; + /* MR/AV page directory. */ + uint64_t mrav_page_dir; + /* Timer page directory. */ + uint64_t tim_page_dir; + /* Number of QPs. */ + uint32_t qp_num_entries; + /* Number of SRQs. */ + uint32_t srq_num_entries; + /* Number of CQs. */ + uint32_t cq_num_entries; + /* Number of Stats. */ + uint32_t stat_num_entries; + /* Number of TQM slowpath entries. */ + uint32_t tqm_sp_num_entries; + /* Number of TQM ring 0 entries. */ + uint32_t tqm_ring0_num_entries; + /* Number of TQM ring 1 entries. */ + uint32_t tqm_ring1_num_entries; + /* Number of TQM ring 2 entries. */ + uint32_t tqm_ring2_num_entries; + /* Number of TQM ring 3 entries. */ + uint32_t tqm_ring3_num_entries; + /* Number of TQM ring 4 entries. */ + uint32_t tqm_ring4_num_entries; + /* Number of TQM ring 5 entries. */ + uint32_t tqm_ring5_num_entries; + /* Number of TQM ring 6 entries. */ + uint32_t tqm_ring6_num_entries; + /* Number of TQM ring 7 entries. */ + uint32_t tqm_ring7_num_entries; + /* Number of MR/AV entries. */ + uint32_t mrav_num_entries; + /* Number of Timer entries. */ + uint32_t tim_num_entries; + /* Number of entries to reserve for QP1 */ + uint16_t qp_num_qp1_entries; + /* Number of entries to reserve for L2 */ + uint16_t qp_num_l2_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t qp_entry_size; + /* Number of entries to reserve for L2 */ + uint16_t srq_num_l2_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t srq_entry_size; + /* Number of entries to reserve for L2 */ + uint16_t cq_num_l2_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t cq_entry_size; + /* Number of entries to reserve for VNIC entries */ + uint16_t vnic_num_vnic_entries; + /* Number of entries to reserve for Ring table entries */ + uint16_t vnic_num_ring_table_entries; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t vnic_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t stat_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t tqm_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t mrav_entry_size; + /* Number of bytes that have been allocated for each context entry. */ + uint16_t tim_entry_size; +} __attribute__((packed)); + +/* hwrm_func_backing_store_cfg_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************************** + * hwrm_func_backing_store_qcfg * + ********************************/ + + +/* hwrm_func_backing_store_qcfg_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_func_backing_store_qcfg_output (size:1920b/240B) */ +struct hwrm_func_backing_store_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* + * When set, the firmware only uses on-chip resources and does not + * expect any backing store to be provided by the host driver. This + * mode provides minimal L2 functionality (e.g. limited L2 resources, + * no RoCE). + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_FLAGS_PREBOOT_MODE \ + UINT32_C(0x1) + uint8_t unused_0[4]; + /* + * This bit must be '1' for the qp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_QP \ + UINT32_C(0x1) + /* + * This bit must be '1' for the srq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_SRQ \ + UINT32_C(0x2) + /* + * This bit must be '1' for the cq fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_CQ \ + UINT32_C(0x4) + /* + * This bit must be '1' for the vnic fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_VNIC \ + UINT32_C(0x8) + /* + * This bit must be '1' for the stat fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_STAT \ + UINT32_C(0x10) + /* + * This bit must be '1' for the tqm_sp fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_SP \ + UINT32_C(0x20) + /* + * This bit must be '1' for the tqm_ring0 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING0 \ + UINT32_C(0x40) + /* + * This bit must be '1' for the tqm_ring1 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING1 \ + UINT32_C(0x80) + /* + * This bit must be '1' for the tqm_ring2 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING2 \ + UINT32_C(0x100) + /* + * This bit must be '1' for the tqm_ring3 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING3 \ + UINT32_C(0x200) + /* + * This bit must be '1' for the tqm_ring4 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING4 \ + UINT32_C(0x400) + /* + * This bit must be '1' for the tqm_ring5 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING5 \ + UINT32_C(0x800) + /* + * This bit must be '1' for the tqm_ring6 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING6 \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the tqm_ring7 fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING7 \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the mrav fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_MRAV \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the tim fields to be + * configured. + */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TIM \ + UINT32_C(0x8000) + /* QPC page size and level. */ + uint8_t qpc_pg_size_qpc_lvl; + /* QPC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 + /* QPC page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G + /* SRQ page size and level. */ + uint8_t srq_pg_size_srq_lvl; + /* SRQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 + /* SRQ page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G + /* CQ page size and level. */ + uint8_t cq_pg_size_cq_lvl; + /* CQ PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 + /* CQ page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G + /* VNIC page size and level. */ + uint8_t vnic_pg_size_vnic_lvl; + /* VNIC PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 + /* VNIC page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G + /* Stat page size and level. */ + uint8_t stat_pg_size_stat_lvl; + /* Stat PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 + /* Stat page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G + /* TQM slow path page size and level. */ + uint8_t tqm_sp_pg_size_tqm_sp_lvl; + /* TQM slow path PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 + /* TQM slow path page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G + /* TQM ring 0 page size and level. */ + uint8_t tqm_ring0_pg_size_tqm_ring0_lvl; + /* TQM ring 0 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 + /* TQM ring 0 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G + /* TQM ring 1 page size and level. */ + uint8_t tqm_ring1_pg_size_tqm_ring1_lvl; + /* TQM ring 1 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 + /* TQM ring 1 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G + /* TQM ring 2 page size and level. */ + uint8_t tqm_ring2_pg_size_tqm_ring2_lvl; + /* TQM ring 2 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 + /* TQM ring 2 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G + /* TQM ring 3 page size and level. */ + uint8_t tqm_ring3_pg_size_tqm_ring3_lvl; + /* TQM ring 3 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 + /* TQM ring 3 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G + /* TQM ring 4 page size and level. */ + uint8_t tqm_ring4_pg_size_tqm_ring4_lvl; + /* TQM ring 4 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 + /* TQM ring 4 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G + /* TQM ring 5 page size and level. */ + uint8_t tqm_ring5_pg_size_tqm_ring5_lvl; + /* TQM ring 5 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 + /* TQM ring 5 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G + /* TQM ring 6 page size and level. */ + uint8_t tqm_ring6_pg_size_tqm_ring6_lvl; + /* TQM ring 6 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 + /* TQM ring 6 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G + /* TQM ring 7 page size and level. */ + uint8_t tqm_ring7_pg_size_tqm_ring7_lvl; + /* TQM ring 7 PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 + /* TQM ring 7 page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G + /* MR/AV page size and level. */ + uint8_t mrav_pg_size_mrav_lvl; + /* MR/AV PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 + /* MR/AV page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G + /* Timer page size and level. */ + uint8_t tim_pg_size_tim_lvl; + /* Timer PBL indirect levels. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_MASK \ + UINT32_C(0xf) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_SFT 0 + /* PBL pointer is physical start address. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_0 \ + UINT32_C(0x0) + /* PBL pointer points to PTE table. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_1 \ + UINT32_C(0x1) + /* PBL pointer points to PDE table with each entry pointing to PTE tables. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 \ + UINT32_C(0x2) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 + /* Timer page size. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_MASK \ + UINT32_C(0xf0) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_SFT 4 + /* 4KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_4K \ + (UINT32_C(0x0) << 4) + /* 8KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8K \ + (UINT32_C(0x1) << 4) + /* 64KB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_64K \ + (UINT32_C(0x2) << 4) + /* 2MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_2M \ + (UINT32_C(0x3) << 4) + /* 8MB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8M \ + (UINT32_C(0x4) << 4) + /* 1GB. */ + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G \ + (UINT32_C(0x5) << 4) + #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_LAST \ + HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G + /* QP page directory. */ + uint64_t qpc_page_dir; + /* SRQ page directory. */ + uint64_t srq_page_dir; + /* CQ page directory. */ + uint64_t cq_page_dir; + /* VNIC page directory. */ + uint64_t vnic_page_dir; + /* Stat page directory. */ + uint64_t stat_page_dir; + /* TQM slowpath page directory. */ + uint64_t tqm_sp_page_dir; + /* TQM ring 0 page directory. */ + uint64_t tqm_ring0_page_dir; + /* TQM ring 1 page directory. */ + uint64_t tqm_ring1_page_dir; + /* TQM ring 2 page directory. */ + uint64_t tqm_ring2_page_dir; + /* TQM ring 3 page directory. */ + uint64_t tqm_ring3_page_dir; + /* TQM ring 4 page directory. */ + uint64_t tqm_ring4_page_dir; + /* TQM ring 5 page directory. */ + uint64_t tqm_ring5_page_dir; + /* TQM ring 6 page directory. */ + uint64_t tqm_ring6_page_dir; + /* TQM ring 7 page directory. */ + uint64_t tqm_ring7_page_dir; + /* MR/AV page directory. */ + uint64_t mrav_page_dir; + /* Timer page directory. */ + uint64_t tim_page_dir; + /* Number of entries to reserve for QP1 */ + uint16_t qp_num_qp1_entries; + /* Number of entries to reserve for L2 */ + uint16_t qp_num_l2_entries; + /* Number of QPs. */ + uint32_t qp_num_entries; + /* Number of SRQs. */ + uint32_t srq_num_entries; + /* Number of entries to reserve for L2 */ + uint16_t srq_num_l2_entries; + /* Number of entries to reserve for L2 */ + uint16_t cq_num_l2_entries; + /* Number of CQs. */ + uint32_t cq_num_entries; + /* Number of entries to reserve for VNIC entries */ + uint16_t vnic_num_vnic_entries; + /* Number of entries to reserve for Ring table entries */ + uint16_t vnic_num_ring_table_entries; + /* Number of Stats. */ + uint32_t stat_num_entries; + /* Number of TQM slowpath entries. */ + uint32_t tqm_sp_num_entries; + /* Number of TQM ring 0 entries. */ + uint32_t tqm_ring0_num_entries; + /* Number of TQM ring 1 entries. */ + uint32_t tqm_ring1_num_entries; + /* Number of TQM ring 2 entries. */ + uint32_t tqm_ring2_num_entries; + /* Number of TQM ring 3 entries. */ + uint32_t tqm_ring3_num_entries; + /* Number of TQM ring 4 entries. */ + uint32_t tqm_ring4_num_entries; + /* Number of TQM ring 5 entries. */ + uint32_t tqm_ring5_num_entries; + /* Number of TQM ring 6 entries. */ + uint32_t tqm_ring6_num_entries; + /* Number of TQM ring 7 entries. */ + uint32_t tqm_ring7_num_entries; + /* Number of MR/AV entries. */ + uint32_t mrav_num_entries; + /* Number of Timer entries. */ + uint32_t tim_num_entries; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_port_phy_cfg * + *********************/ + + +/* hwrm_port_phy_cfg_input (size:448b/56B) */ +struct hwrm_port_phy_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * When this bit is set to '1', the PHY for the port shall + * be reset. + * + * # If this bit is set to 1, then the HWRM shall reset the + * PHY after applying PHY configuration changes specified + * in this command. + * # In order to guarantee that PHY configuration changes + * specified in this command take effect, the HWRM + * client should set this flag to 1. + * # If this bit is not set to 1, then the HWRM may reset + * the PHY depending on the current PHY configuration and + * settings specified in this command. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY \ + UINT32_C(0x1) + /* deprecated bit. Do not use!!! */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED \ + UINT32_C(0x2) + /* + * When this bit is set to '1', the link shall be forced to + * the force_link_speed value. + * + * When this bit is set to '1', the HWRM client should + * not enable any of the auto negotiation related + * fields represented by auto_XXX fields in this command. + * When this bit is set to '1' and the HWRM client has + * enabled a auto_XXX field in this command, then the + * HWRM shall ignore the enabled auto_XXX field. + * + * When this bit is set to zero, the link + * shall be allowed to autoneg. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE \ + UINT32_C(0x4) + /* + * When this bit is set to '1', the auto-negotiation process + * shall be restarted on the link. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG \ + UINT32_C(0x8) + /* + * When this bit is set to '1', Energy Efficient Ethernet + * (EEE) is requested to be enabled on this link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE \ + UINT32_C(0x10) + /* + * When this bit is set to '1', Energy Efficient Ethernet + * (EEE) is requested to be disabled on this link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE \ + UINT32_C(0x20) + /* + * When this bit is set to '1' and EEE is enabled on this + * link, then TX LPI is requested to be enabled on the link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + * If EEE is disabled on this port, then this flag shall be + * ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE \ + UINT32_C(0x40) + /* + * When this bit is set to '1' and EEE is enabled on this + * link, then TX LPI is requested to be disabled on the link. + * If EEE is not supported on this port, then this flag + * shall be ignored by the HWRM. + * If EEE is disabled on this port, then this flag shall be + * ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE \ + UINT32_C(0x80) + /* + * When set to 1, then the HWRM shall enable FEC autonegotitation + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC autonegotiation is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE \ + UINT32_C(0x100) + /* + * When set to 1, then the HWRM shall disable FEC autonegotiation + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC autonegotiation is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \ + UINT32_C(0x200) + /* + * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire Code) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \ + UINT32_C(0x400) + /* + * When set to 1, then the HWRM shall disable FEC CLAUSE 74 (Fire Code) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \ + UINT32_C(0x800) + /* + * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed Solomon) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \ + UINT32_C(0x1000) + /* + * When set to 1, then the HWRM shall disable FEC CLAUSE 91 (Reed Solomon) + * on this port if supported. + * When set to 0, then this flag shall be ignored. + * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this + * flag. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \ + UINT32_C(0x2000) + /* + * When this bit is set to '1', the link shall be forced to + * be taken down. + * + * # When this bit is set to '1", all other + * command input settings related to the link speed shall + * be ignored. + * Once the link state is forced down, it can be + * explicitly cleared from that state by setting this flag + * to '0'. + * # If this flag is set to '0', then the link shall be + * cleared from forced down state if the link is in forced + * down state. + * There may be conditions (e.g. out-of-band or sideband + * configuration changes for the link) outside the scope + * of the HWRM implementation that may clear forced down + * link state. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN \ + UINT32_C(0x4000) + uint32_t enables; + /* + * This bit must be '1' for the auto_mode field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE \ + UINT32_C(0x1) + /* + * This bit must be '1' for the auto_duplex field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX \ + UINT32_C(0x2) + /* + * This bit must be '1' for the auto_pause field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE \ + UINT32_C(0x4) + /* + * This bit must be '1' for the auto_link_speed field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED \ + UINT32_C(0x8) + /* + * This bit must be '1' for the auto_link_speed_mask field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \ + UINT32_C(0x10) + /* + * This bit must be '1' for the wirespeed field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIRESPEED \ + UINT32_C(0x20) + /* + * This bit must be '1' for the lpbk field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK \ + UINT32_C(0x40) + /* + * This bit must be '1' for the preemphasis field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS \ + UINT32_C(0x80) + /* + * This bit must be '1' for the force_pause field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE \ + UINT32_C(0x100) + /* + * This bit must be '1' for the eee_link_speed_mask field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \ + UINT32_C(0x200) + /* + * This bit must be '1' for the tx_lpi_timer field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER \ + UINT32_C(0x400) + /* Port ID of port that is to be configured. */ + uint16_t port_id; + /* + * This is the speed that will be used if the force + * bit is '1'. If unsupported speed is selected, an error + * will be generated. + */ + uint16_t force_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_LAST \ + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB + /* + * This value is used to identify what autoneg mode is + * used when the link speed is not being forced. + */ + uint8_t auto_mode; + /* Disable autoneg or autoneg disabled. No speeds are selected. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1) + /* + * Select only the auto_link_speed speed for autoneg mode. This mode has + * been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2) + /* + * Select the auto_link_speed or any speed below that speed for autoneg. + * This mode has been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) + /* + * Select the speeds based on the corresponding link speed mask value + * that is provided. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_LAST \ + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK + /* + * This is the duplex setting that will be used if the autoneg_mode + * is "one_speed" or "one_or_below". + */ + uint8_t auto_duplex; + /* Half Duplex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0) + /* Full duplex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1) + /* Both Half and Full dupex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_LAST \ + HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH + /* + * This value is used to configure the pause that will be + * used for autonegotiation. + * Add text on the usage of auto_pause and force_pause. + */ + uint8_t auto_pause; + /* + * When this bit is '1', Generation of tx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX \ + UINT32_C(0x2) + /* + * When set to 1, the advertisement of pause is enabled. + * + * # When the auto_mode is not set to none and this flag is + * set to 1, then the auto_pause bits on this port are being + * advertised and autoneg pause results are being interpreted. + * # When the auto_mode is not set to none and this + * flag is set to 0, the pause is forced as indicated in + * force_pause, and also advertised as auto_pause bits, but + * the autoneg results are not interpreted since the pause + * configuration is being forced. + * # When the auto_mode is set to none and this flag is set to + * 1, auto_pause bits should be ignored and should be set to 0. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE \ + UINT32_C(0x4) + uint8_t unused_0; + /* + * This is the speed that will be used if the autoneg_mode + * is "one_speed" or "one_or_below". If an unsupported speed + * is selected, an error will be generated. + */ + uint16_t auto_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_LAST \ + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB + /* + * This is a mask of link speeds that will be used if + * autoneg_mode is "mask". If unsupported speed is enabled + * an error will be generated. + */ + uint16_t auto_link_speed_mask; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \ + UINT32_C(0x2000) + /* This value controls the wirespeed feature. */ + uint8_t wirespeed; + /* Wirespeed feature is disabled. */ + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_OFF UINT32_C(0x0) + /* Wirespeed feature is enabled. */ + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON UINT32_C(0x1) + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_LAST \ + HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON + /* This value controls the loopback setting for the PHY. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2) + /* + * The HW will be configured with external loopback such that + * host data is sent on the trasmitter and based on the external + * loopback connection the data will be received without modification. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL UINT32_C(0x3) + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LAST \ + HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL + /* + * This value is used to configure the pause that will be + * used for force mode. + */ + uint8_t force_pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2) + uint8_t unused_1; + /* + * This value controls the pre-emphasis to be used for the + * link. Driver should not set this value (use + * enable.preemphasis = 0) unless driver is sure of setting. + * Normally HWRM FW will determine proper pre-emphasis. + */ + uint32_t preemphasis; + /* + * Setting for link speed mask that is used to + * advertise speeds during autonegotiation when EEE is enabled. + * This field is valid only when EEE is enabled. + * The speeds specified in this field shall be a subset of + * speeds specified in auto_link_speed_mask. + * If EEE is enabled,then at least one speed shall be provided + * in this mask. + */ + uint16_t eee_link_speed_mask; + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + uint8_t unused_2[2]; + /* + * Reuested setting of TX LPI timer in microseconds. + * This field is valid only when EEE is enabled and TX LPI is + * enabled. + */ + uint32_t tx_lpi_timer; + #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff) + #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0 + uint32_t unused_3; +} __attribute__((packed)); + +/* hwrm_port_phy_cfg_output (size:128b/16B) */ +struct hwrm_port_phy_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_phy_cfg_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* Unable to complete operation due to invalid speed */ + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED UINT32_C(0x1) + /* + * retry the command since the phy is not ready. + * retry count is returned in opaque_0. + * This is only valid for the first command and + * this value will not change for successive calls. + * but if a 0 is returned at any time then this should + * be treated as an un recoverable failure, + * + * retry interval in milli seconds is returned in opaque_1. + * This specifies the time that user should wait before + * issuing the next port_phy_cfg command. + */ + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY UINT32_C(0x2) + #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_LAST \ + HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY + uint8_t unused_0[7]; +} __attribute__((packed)); + +/********************** + * hwrm_port_phy_qcfg * + **********************/ + + +/* hwrm_port_phy_qcfg_input (size:192b/24B) */ +struct hwrm_port_phy_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is to be queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_phy_qcfg_output (size:768b/96B) */ +struct hwrm_port_phy_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value indicates the current link status. */ + uint8_t link; + /* There is no link or cable detected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0) + /* There is no link, but a cable has been detected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1) + /* There is a link. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK + uint8_t unused_0; + /* This value indicates the current link speed of the connection. */ + uint16_t link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB + /* + * This value is indicates the duplex of the current + * configuration. + */ + uint8_t duplex_cfg; + /* Half Duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_HALF UINT32_C(0x0) + /* Full duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL + /* + * This value is used to indicate the current + * pause configuration. When autoneg is enabled, this value + * represents the autoneg results of pause configuration. + */ + uint8_t pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2) + /* + * The supported speeds for the port. This is a bit mask. + * For each speed that is supported, the corrresponding + * bit will be set to '1'. + */ + uint16_t support_speeds; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB \ + UINT32_C(0x2000) + /* + * Current setting of forced link speed. + * When the link speed is not being forced, this + * value shall be set to 0. + */ + uint16_t force_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB \ + UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB \ + UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \ + UINT32_C(0x3e8) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB \ + UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB + /* Current setting of auto negotiation mode. */ + uint8_t auto_mode; + /* Disable autoneg or autoneg disabled. No speeds are selected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1) + /* + * Select only the auto_link_speed speed for autoneg mode. This mode has + * been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2) + /* + * Select the auto_link_speed or any speed below that speed for autoneg. + * This mode has been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) + /* + * Select the speeds based on the corresponding link speed mask value + * that is provided. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4) + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK + /* + * Current setting of pause autonegotiation. + * Move autoneg_pause flag here. + */ + uint8_t auto_pause; + /* + * When this bit is '1', Generation of tx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * has been requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX \ + UINT32_C(0x2) + /* + * When set to 1, the advertisement of pause is enabled. + * + * # When the auto_mode is not set to none and this flag is + * set to 1, then the auto_pause bits on this port are being + * advertised and autoneg pause results are being interpreted. + * # When the auto_mode is not set to none and this + * flag is set to 0, the pause is forced as indicated in + * force_pause, and also advertised as auto_pause bits, but + * the autoneg results are not interpreted since the pause + * configuration is being forced. + * # When the auto_mode is set to none and this flag is set to + * 1, auto_pause bits should be ignored and should be set to 0. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \ + UINT32_C(0x4) + /* + * Current setting for auto_link_speed. This field is only + * valid when auto_mode is set to "one_speed" or "one_or_below". + */ + uint16_t auto_link_speed; + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB \ + UINT32_C(0xffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB + /* + * Current setting for auto_link_speed_mask that is used to + * advertise speeds during autonegotiation. + * This field is only valid when auto_mode is set to "mask". + * The speeds specified in this field shall be a subset of + * supported speeds on this port. + */ + uint16_t auto_link_speed_mask; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \ + UINT32_C(0x2000) + /* Current setting for wirespeed. */ + uint8_t wirespeed; + /* Wirespeed feature is disabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_OFF UINT32_C(0x0) + /* Wirespeed feature is enabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON + /* Current setting for loopback. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) + /* + * The HW will be configured with external loopback such that + * host data is sent on the trasmitter and based on the external + * loopback connection the data will be received without modification. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL UINT32_C(0x3) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL + /* + * Current setting of forced pause. + * When the pause configuration is not being forced, then + * this value shall be set to 0. + */ + uint8_t force_pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2) + /* + * This value indicates the current status of the optics module on + * this port. + */ + uint8_t module_status; + /* Module is inserted and accepted */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE \ + UINT32_C(0x0) + /* Module is rejected and transmit side Laser is disabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \ + UINT32_C(0x1) + /* Module mismatch warning. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \ + UINT32_C(0x2) + /* Module is rejected and powered down. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN \ + UINT32_C(0x3) + /* Module is not inserted. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \ + UINT32_C(0x4) + /* Module status is not applicable. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \ + UINT32_C(0xff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE + /* Current setting for preemphasis. */ + uint32_t preemphasis; + /* This field represents the major version of the PHY. */ + uint8_t phy_maj; + /* This field represents the minor version of the PHY. */ + uint8_t phy_min; + /* This field represents the build version of the PHY. */ + uint8_t phy_bld; + /* This value represents a PHY type. */ + uint8_t phy_type; + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN \ + UINT32_C(0x0) + /* BASE-CR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR \ + UINT32_C(0x1) + /* BASE-KR4 (Deprecated) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 \ + UINT32_C(0x2) + /* BASE-LR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR \ + UINT32_C(0x3) + /* BASE-SR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR \ + UINT32_C(0x4) + /* BASE-KR2 (Deprecated) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 \ + UINT32_C(0x5) + /* BASE-KX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX \ + UINT32_C(0x6) + /* BASE-KR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR \ + UINT32_C(0x7) + /* BASE-T */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET \ + UINT32_C(0x8) + /* EEE capable BASE-T */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE \ + UINT32_C(0x9) + /* SGMII connected external PHY */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY \ + UINT32_C(0xa) + /* 25G_BASECR_CA_L */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L \ + UINT32_C(0xb) + /* 25G_BASECR_CA_S */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S \ + UINT32_C(0xc) + /* 25G_BASECR_CA_N */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N \ + UINT32_C(0xd) + /* 25G_BASESR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR \ + UINT32_C(0xe) + /* 100G_BASECR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 \ + UINT32_C(0xf) + /* 100G_BASESR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 \ + UINT32_C(0x10) + /* 100G_BASELR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 \ + UINT32_C(0x11) + /* 100G_BASEER4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 \ + UINT32_C(0x12) + /* 100G_BASESR10 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 \ + UINT32_C(0x13) + /* 40G_BASECR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 \ + UINT32_C(0x14) + /* 40G_BASESR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 \ + UINT32_C(0x15) + /* 40G_BASELR4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 \ + UINT32_C(0x16) + /* 40G_BASEER4 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 \ + UINT32_C(0x17) + /* 40G_ACTIVE_CABLE */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \ + UINT32_C(0x18) + /* 1G_baseT */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET \ + UINT32_C(0x19) + /* 1G_baseSX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX \ + UINT32_C(0x1a) + /* 1G_baseCX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX \ + UINT32_C(0x1b) + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX + /* This value represents a media type. */ + uint8_t media_type; + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0) + /* Twisted Pair */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1) + /* Direct Attached Copper */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2) + /* Fiber */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3) + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE + /* This value represents a transceiver type. */ + uint8_t xcvr_pkg_type; + /* PHY and MAC are in the same package */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \ + UINT32_C(0x1) + /* PHY and MAC are in different packages */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \ + UINT32_C(0x2) + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL + uint8_t eee_config_phy_addr; + /* This field represents PHY address. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK \ + UINT32_C(0x1f) + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0 + /* + * This field represents flags related to EEE configuration. + * These EEE configuration flags are valid only when the + * auto_mode is not set to none (in other words autonegotiation + * is enabled). + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK \ + UINT32_C(0xe0) + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5 + /* + * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled. + * Speeds for autoneg with EEE mode enabled + * are based on eee_link_speed_mask. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \ + UINT32_C(0x20) + /* + * This flag is valid only when eee_enabled is set to 1. + * + * # If eee_enabled is set to 0, then EEE mode is disabled + * and this flag shall be ignored. + * # If eee_enabled is set to 1 and this flag is set to 1, + * then Energy Efficient Ethernet (EEE) mode is enabled + * and in use. + * # If eee_enabled is set to 1 and this flag is set to 0, + * then Energy Efficient Ethernet (EEE) mode is enabled + * but is currently not in use. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE \ + UINT32_C(0x40) + /* + * This flag is valid only when eee_enabled is set to 1. + * + * # If eee_enabled is set to 0, then EEE mode is disabled + * and this flag shall be ignored. + * # If eee_enabled is set to 1 and this flag is set to 1, + * then Energy Efficient Ethernet (EEE) mode is enabled + * and TX LPI is enabled. + * # If eee_enabled is set to 1 and this flag is set to 0, + * then Energy Efficient Ethernet (EEE) mode is enabled + * but TX LPI is disabled. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI \ + UINT32_C(0x80) + /* + * When set to 1, the parallel detection is used to determine + * the speed of the link partner. + * + * Parallel detection is used when a autonegotiation capable + * device is connected to a link parter that is not capable + * of autonegotiation. + */ + uint8_t parallel_detect; + /* + * When set to 1, the parallel detection is used to determine + * the speed of the link partner. + * + * Parallel detection is used when a autonegotiation capable + * device is connected to a link parter that is not capable + * of autonegotiation. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1) + /* + * The advertised speeds for the port by the link partner. + * Each advertised speed will be set to '1'. + */ + uint16_t link_partner_adv_speeds; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \ + UINT32_C(0x2000) + /* + * The advertised autoneg for the port by the link partner. + * This field is deprecated and should be set to 0. + */ + uint8_t link_partner_adv_auto_mode; + /* Disable autoneg or autoneg disabled. No speeds are selected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \ + UINT32_C(0x0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \ + UINT32_C(0x1) + /* + * Select only the auto_link_speed speed for autoneg mode. This mode has + * been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \ + UINT32_C(0x2) + /* + * Select the auto_link_speed or any speed below that speed for autoneg. + * This mode has been DEPRECATED. An HWRM client should not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \ + UINT32_C(0x3) + /* + * Select the speeds based on the corresponding link speed mask value + * that is provided. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \ + UINT32_C(0x4) + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK + /* The advertised pause settings on the port by the link partner. */ + uint8_t link_partner_adv_pause; + /* + * When this bit is '1', Generation of tx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages + * is supported. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \ + UINT32_C(0x2) + /* + * Current setting for link speed mask that is used to + * advertise speeds during autonegotiation when EEE is enabled. + * This field is valid only when eee_enabled flags is set to 1. + * The speeds specified in this field shall be a subset of + * speeds specified in auto_link_speed_mask. + */ + uint16_t adv_eee_link_speed_mask; + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* + * Current setting for link speed mask that is advertised by + * the link partner when EEE is enabled. + * This field is valid only when eee_enabled flags is set to 1. + */ + uint16_t link_partner_adv_eee_link_speed_mask; + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + uint32_t xcvr_identifier_type_tx_lpi_timer; + /* + * Current setting of TX LPI timer in microseconds. + * This field is valid only when_eee_enabled flag is set to 1 + * and tx_lpi_enabled is set to 1. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0 + /* This value represents transceiver identifier type. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \ + UINT32_C(0xff000000) + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24 + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \ + (UINT32_C(0x0) << 24) + /* SFP/SFP+/SFP28 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \ + (UINT32_C(0x3) << 24) + /* QSFP+ */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \ + (UINT32_C(0xc) << 24) + /* QSFP+ */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \ + (UINT32_C(0xd) << 24) + /* QSFP28 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \ + (UINT32_C(0x11) << 24) + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 + /* + * This value represents the current configuration of + * Forward Error Correction (FEC) on the port. + */ + uint16_t fec_cfg; + /* + * When set to 1, then FEC is not supported on this port. If this flag + * is set to 1, then all other FEC configuration flags shall be ignored. + * When set to 0, then FEC is supported as indicated by other + * configuration flags. + * If no cable is attached and the HWRM does not yet know the FEC + * capability, then the HWRM shall set this flag to 1 when reporting + * FEC capability. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \ + UINT32_C(0x1) + /* + * When set to 1, then FEC autonegotiation is supported on this port. + * When set to 0, then FEC autonegotiation is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \ + UINT32_C(0x2) + /* + * When set to 1, then FEC autonegotiation is enabled on this port. + * When set to 0, then FEC autonegotiation is disabled if supported. + * This flag should be ignored if FEC autonegotiation is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \ + UINT32_C(0x4) + /* + * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on this port. + * When set to 0, then FEC CLAUSE 74 (Fire Code) is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \ + UINT32_C(0x8) + /* + * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on this port. + * When set to 0, then FEC CLAUSE 74 (Fire Code) is disabled if supported. + * This flag should be ignored if FEC CLAUSE 74 is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \ + UINT32_C(0x10) + /* + * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported on this port. + * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \ + UINT32_C(0x20) + /* + * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled on this port. + * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is disabled if supported. + * This flag should be ignored if FEC CLAUSE 91 is not supported on this port. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \ + UINT32_C(0x40) + /* + * This value is indicates the duplex of the current + * connection state. + */ + uint8_t duplex_state; + /* Half Duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF UINT32_C(0x0) + /* Full duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_LAST \ + HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL + /* Option flags fields. */ + uint8_t option_flags; + /* When this bit is '1', Media auto detect is enabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_MEDIA_AUTO_DETECT \ + UINT32_C(0x1) + /* + * Up to 16 bytes of null padded ASCII string representing + * PHY vendor. + * If the string is set to null, then the vendor name is not + * available. + */ + char phy_vendor_name[16]; + /* + * Up to 16 bytes of null padded ASCII string that + * identifies vendor specific part number of the PHY. + * If the string is set to null, then the vendor specific + * part number is not available. + */ + char phy_vendor_partnumber[16]; + uint8_t unused_2[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_port_mac_cfg * + *********************/ + + +/* hwrm_port_mac_cfg_input (size:320b/40B) */ +struct hwrm_port_mac_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * In this field, there are a number of CoS mappings related flags + * that are used to configure CoS mappings and their corresponding + * priorities in the hardware. + * For the priorities of CoS mappings, the HWRM uses the following + * priority order (high to low) by default: + * # vlan pri + * # ip_dscp + * # tunnel_vlan_pri + * # default cos + * + * A subset of CoS mappings can be enabled. + * If a priority is not specified for an enabled CoS mapping, the + * priority will be assigned in the above order for the enabled CoS + * mappings. For example, if vlan_pri and ip_dscp CoS mappings are + * enabled and their priorities are not specified, the following + * priority order (high to low) will be used by the HWRM: + * # vlan_pri + * # ip_dscp + * # default cos + * + * vlan_pri CoS mapping together with default CoS with lower priority + * are enabled by default by the HWRM. + */ + uint32_t flags; + /* + * When this bit is '1', this command will configure + * the MAC to match the current link state of the PHY. + * If the link is not established on the PHY, then this + * bit has no effect. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_MATCH_LINK \ + UINT32_C(0x1) + /* + * When this bit is set to '1', the inner VLAN PRI to CoS mapping + * is requested to be enabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_ENABLE \ + UINT32_C(0x2) + /* + * When this bit is set to '1', tunnel VLAN PRI field to + * CoS mapping is requested to be enabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \ + UINT32_C(0x4) + /* + * When this bit is set to '1', the IP DSCP to CoS mapping is + * requested to be enabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_ENABLE \ + UINT32_C(0x8) + /* + * When this bit is '1', the HWRM is requested to + * enable timestamp capture capability on the receive side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \ + UINT32_C(0x10) + /* + * When this bit is '1', the HWRM is requested to + * disable timestamp capture capability on the receive side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE \ + UINT32_C(0x20) + /* + * When this bit is '1', the HWRM is requested to + * enable timestamp capture capability on the transmit side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \ + UINT32_C(0x40) + /* + * When this bit is '1', the HWRM is requested to + * disable timestamp capture capability on the transmit side + * of this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE \ + UINT32_C(0x80) + /* + * When this bit is '1', the Out-Of-Box WoL is requested to + * be enabled on this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_ENABLE \ + UINT32_C(0x100) + /* + * When this bit is '1', the the Out-Of-Box WoL is requested to + * be disabled on this port. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_DISABLE \ + UINT32_C(0x200) + /* + * When this bit is set to '1', the inner VLAN PRI to CoS mapping + * is requested to be disabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_DISABLE \ + UINT32_C(0x400) + /* + * When this bit is set to '1', tunnel VLAN PRI field to + * CoS mapping is requested to be disabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_DISABLE \ + UINT32_C(0x800) + /* + * When this bit is set to '1', the IP DSCP to CoS mapping is + * requested to be disabled. + */ + #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_DISABLE \ + UINT32_C(0x1000) + uint32_t enables; + /* + * This bit must be '1' for the ipg field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_IPG \ + UINT32_C(0x1) + /* + * This bit must be '1' for the lpbk field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_LPBK \ + UINT32_C(0x2) + /* + * This bit must be '1' for the vlan_pri2cos_map_pri field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_VLAN_PRI2COS_MAP_PRI \ + UINT32_C(0x4) + /* + * This bit must be '1' for the tunnel_pri2cos_map_pri field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TUNNEL_PRI2COS_MAP_PRI \ + UINT32_C(0x10) + /* + * This bit must be '1' for the dscp2cos_map_pri field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_DSCP2COS_MAP_PRI \ + UINT32_C(0x20) + /* + * This bit must be '1' for the rx_ts_capture_ptp_msg_type field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE \ + UINT32_C(0x40) + /* + * This bit must be '1' for the tx_ts_capture_ptp_msg_type field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE \ + UINT32_C(0x80) + /* + * This bit must be '1' for the cos_field_cfg field to be + * configured. + */ + #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_COS_FIELD_CFG \ + UINT32_C(0x100) + /* Port ID of port that is to be configured. */ + uint16_t port_id; + /* + * This value is used to configure the minimum IPG that will + * be sent between packets by this port. + */ + uint8_t ipg; + /* This value controls the loopback setting for the MAC. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2) + #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LAST \ + HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE + /* + * This value controls the priority setting of VLAN PRI to CoS + * mapping based on VLAN Tags of inner packet headers of + * tunneled packets or packet headers of non-tunneled packets. + * + * # Each XXX_pri variable shall have a unique priority value + * when it is being specified. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + */ + uint8_t vlan_pri2cos_map_pri; + /* Reserved field. */ + uint8_t reserved1; + /* + * This value controls the priority setting of VLAN PRI to CoS + * mapping based on VLAN Tags of tunneled header. + * This mapping only applies when tunneled headers + * are present. + * + * # Each XXX_pri variable shall have a unique priority value + * when it is being specified. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + */ + uint8_t tunnel_pri2cos_map_pri; + /* + * This value controls the priority setting of IP DSCP to CoS + * mapping based on inner IP header of tunneled packets or + * IP header of non-tunneled packets. + * + * # Each XXX_pri variable shall have a unique priority value + * when it is being specified. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + */ + uint8_t dscp2pri_map_pri; + /* + * This is a 16-bit bit mask that is used to request a + * specific configuration of time stamp capture of PTP messages + * on the receive side of this port. + * This field shall be ignored if the ptp_rx_ts_capture_enable + * flag is not set in this command. + * Otherwise, if bit 'i' is set, then the HWRM is being + * requested to configure the receive side of the port to + * capture the time stamp of every received PTP message + * with messageType field value set to i. + */ + uint16_t rx_ts_capture_ptp_msg_type; + /* + * This is a 16-bit bit mask that is used to request a + * specific configuration of time stamp capture of PTP messages + * on the transmit side of this port. + * This field shall be ignored if the ptp_tx_ts_capture_enable + * flag is not set in this command. + * Otherwise, if bit 'i' is set, then the HWRM is being + * requested to configure the transmit sied of the port to + * capture the time stamp of every transmitted PTP message + * with messageType field value set to i. + */ + uint16_t tx_ts_capture_ptp_msg_type; + /* Configuration of CoS fields. */ + uint8_t cos_field_cfg; + /* Reserved */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_RSVD1 \ + UINT32_C(0x1) + /* + * This field is used to specify selection of VLAN PRI value + * based on whether one or two VLAN Tags are present in + * the inner packet headers of tunneled packets or + * non-tunneled packets. + * This field is valid only if inner VLAN PRI to CoS mapping + * is enabled. + * If VLAN PRI to CoS mapping is not enabled, then this + * field shall be ignored. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \ + UINT32_C(0x6) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \ + 1 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the inner packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 1) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the inner packet headers. + * No VLAN PRI shall be selected for this configuration + * if only one VLAN Tag is present in the inner + * packet headers. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 1) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the inner packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 1) + /* Unspecified */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 1) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field is used to specify selection of tunnel VLAN + * PRI value based on whether one or two VLAN Tags are + * present in tunnel headers. + * This field is valid only if tunnel VLAN PRI to CoS mapping + * is enabled. + * If tunnel VLAN PRI to CoS mapping is not enabled, then this + * field shall be ignored. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \ + UINT32_C(0x18) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \ + 3 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 3) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the tunnel packet headers. + * No tunnel VLAN PRI shall be selected for this + * configuration if only one VLAN Tag is present in + * the tunnel packet headers. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 3) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 3) + /* Unspecified */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 3) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field shall be used to provide default CoS value + * that has been configured on this port. + * This field is valid only if default CoS mapping + * is enabled. + * If default CoS mapping is not enabled, then this + * field shall be ignored. + */ + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \ + UINT32_C(0xe0) + #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \ + 5 + uint8_t unused_0[3]; +} __attribute__((packed)); + +/* hwrm_port_mac_cfg_output (size:128b/16B) */ +struct hwrm_port_mac_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be received on the port. + * This value does not include the number of bytes used by + * Ethernet header and trailer (CRC). + */ + uint16_t mru; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be transmitted on the port. + * This value does not include the number of bytes used by + * Ethernet header and trailer (CRC). + */ + uint16_t mtu; + /* Current configuration of the IPG value. */ + uint8_t ipg; + /* Current value of the loopback value. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) + #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LAST \ + HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_port_mac_qcfg * + **********************/ + + +/* hwrm_port_mac_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is to be configured. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_mac_qcfg_output (size:192b/24B) */ +struct hwrm_port_mac_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be received on the port. + * This value does not include the number of bytes used by the + * Ethernet header and trailer (CRC). + */ + uint16_t mru; + /* + * This is the configured maximum length of Ethernet packet + * payload that is allowed to be transmitted on the port. + * This value does not include the number of bytes used by the + * Ethernet header and trailer (CRC). + */ + uint16_t mtu; + /* + * The minimum IPG that will + * be sent between packets by this port. + */ + uint8_t ipg; + /* The loopback setting for the MAC. */ + uint8_t lpbk; + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + /* + * The HW will be configured with local loopback such that + * host data is sent back to the host without modification. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + /* + * The HW will be configured with remote loopback such that + * port logic will send packets back out the transmitter that + * are received. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) + #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LAST \ + HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE + /* + * Priority setting for VLAN PRI to CoS mapping. + * # Each XXX_pri variable shall have a unique priority value + * when it is being used. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + * # If the correspoding CoS mapping is not enabled, then this + * field should be ignored. + * # This value indicates the normalized priority value retained + * in the HWRM. + */ + uint8_t vlan_pri2cos_map_pri; + /* + * In this field, a number of CoS mappings related flags + * are used to indicate configured CoS mappings. + */ + uint8_t flags; + /* + * When this bit is set to '1', the inner VLAN PRI to CoS mapping + * is enabled. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_VLAN_PRI2COS_ENABLE \ + UINT32_C(0x1) + /* + * When this bit is set to '1', tunnel VLAN PRI field to + * CoS mapping is enabled. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \ + UINT32_C(0x2) + /* + * When this bit is set to '1', the IP DSCP to CoS mapping is + * enabled. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_IP_DSCP2COS_ENABLE \ + UINT32_C(0x4) + /* + * When this bit is '1', the Out-Of-Box WoL is enabled on this + * port. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_OOB_WOL_ENABLE \ + UINT32_C(0x8) + /* When this bit is '1', PTP is enabled for RX on this port. */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \ + UINT32_C(0x10) + /* When this bit is '1', PTP is enabled for TX on this port. */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \ + UINT32_C(0x20) + /* + * Priority setting for tunnel VLAN PRI to CoS mapping. + * # Each XXX_pri variable shall have a unique priority value + * when it is being used. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + * # If the correspoding CoS mapping is not enabled, then this + * field should be ignored. + * # This value indicates the normalized priority value retained + * in the HWRM. + */ + uint8_t tunnel_pri2cos_map_pri; + /* + * Priority setting for DSCP to PRI mapping. + * # Each XXX_pri variable shall have a unique priority value + * when it is being used. + * # When comparing priorities of mappings, higher value + * indicates higher priority. + * For example, a value of 0-3 is returned where 0 is being + * the lowest priority and 3 is being the highest priority. + * # If the correspoding CoS mapping is not enabled, then this + * field should be ignored. + * # This value indicates the normalized priority value retained + * in the HWRM. + */ + uint8_t dscp2pri_map_pri; + /* + * This is a 16-bit bit mask that represents the + * current configuration of time stamp capture of PTP messages + * on the receive side of this port. + * If bit 'i' is set, then the receive side of the port + * is configured to capture the time stamp of every + * received PTP message with messageType field value set + * to i. + * If all bits are set to 0 (i.e. field value set 0), + * then the receive side of the port is not configured + * to capture timestamp for PTP messages. + * If all bits are set to 1, then the receive side of the + * port is configured to capture timestamp for all PTP + * messages. + */ + uint16_t rx_ts_capture_ptp_msg_type; + /* + * This is a 16-bit bit mask that represents the + * current configuration of time stamp capture of PTP messages + * on the transmit side of this port. + * If bit 'i' is set, then the transmit side of the port + * is configured to capture the time stamp of every + * received PTP message with messageType field value set + * to i. + * If all bits are set to 0 (i.e. field value set 0), + * then the transmit side of the port is not configured + * to capture timestamp for PTP messages. + * If all bits are set to 1, then the transmit side of the + * port is configured to capture timestamp for all PTP + * messages. + */ + uint16_t tx_ts_capture_ptp_msg_type; + /* Configuration of CoS fields. */ + uint8_t cos_field_cfg; + /* Reserved */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_RSVD \ + UINT32_C(0x1) + /* + * This field is used for selecting VLAN PRI value + * based on whether one or two VLAN Tags are present in + * the inner packet headers of tunneled packets or + * non-tunneled packets. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \ + UINT32_C(0x6) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \ + 1 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the inner packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 1) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the inner packet headers. + * No VLAN PRI is selected for this configuration + * if only one VLAN Tag is present in the inner + * packet headers. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 1) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the inner packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 1) + /* Unspecified */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 1) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field is used for selecting tunnel VLAN PRI value + * based on whether one or two VLAN Tags are present in + * the tunnel headers of tunneled packets. This selection + * does not apply to non-tunneled packets. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \ + UINT32_C(0x18) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \ + 3 + /* + * Select inner VLAN PRI when 1 or 2 VLAN Tags are + * present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \ + (UINT32_C(0x0) << 3) + /* + * Select outer VLAN Tag PRI when 2 VLAN Tags are + * present in the tunnel packet headers. + * No VLAN PRI is selected for this configuration + * if only one VLAN Tag is present in the tunnel + * packet headers. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \ + (UINT32_C(0x1) << 3) + /* + * Select outermost VLAN PRI when 1 or 2 VLAN Tags + * are present in the tunnel packet headers + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \ + (UINT32_C(0x2) << 3) + /* Unspecified */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \ + (UINT32_C(0x3) << 3) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \ + HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + /* + * This field is used to provide default CoS value that + * has been configured on this port. + */ + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \ + UINT32_C(0xe0) + #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \ + 5 + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_port_mac_ptp_qcfg * + **************************/ + + +/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_ptp_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */ +struct hwrm_port_mac_ptp_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * In this field, a number of PTP related flags + * are used to indicate configured PTP capabilities. + */ + uint8_t flags; + /* + * When this bit is set to '1', the PTP related registers are + * directly accessible by the host. + */ + #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS \ + UINT32_C(0x1) + /* + * When this bit is set to '1', the PTP information is accessible + * via HWRM commands. + */ + #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS \ + UINT32_C(0x2) + uint8_t unused_0[3]; + /* Offset of the PTP register for the lower 32 bits of timestamp for RX. */ + uint32_t rx_ts_reg_off_lower; + /* Offset of the PTP register for the upper 32 bits of timestamp for RX. */ + uint32_t rx_ts_reg_off_upper; + /* Offset of the PTP register for the sequence ID for RX. */ + uint32_t rx_ts_reg_off_seq_id; + /* Offset of the first PTP source ID for RX. */ + uint32_t rx_ts_reg_off_src_id_0; + /* Offset of the second PTP source ID for RX. */ + uint32_t rx_ts_reg_off_src_id_1; + /* Offset of the third PTP source ID for RX. */ + uint32_t rx_ts_reg_off_src_id_2; + /* Offset of the domain ID for RX. */ + uint32_t rx_ts_reg_off_domain_id; + /* Offset of the PTP FIFO register for RX. */ + uint32_t rx_ts_reg_off_fifo; + /* Offset of the PTP advance FIFO register for RX. */ + uint32_t rx_ts_reg_off_fifo_adv; + /* PTP timestamp granularity for RX. */ + uint32_t rx_ts_reg_off_granularity; + /* Offset of the PTP register for the lower 32 bits of timestamp for TX. */ + uint32_t tx_ts_reg_off_lower; + /* Offset of the PTP register for the upper 32 bits of timestamp for TX. */ + uint32_t tx_ts_reg_off_upper; + /* Offset of the PTP register for the sequence ID for TX. */ + uint32_t tx_ts_reg_off_seq_id; + /* Offset of the PTP FIFO register for TX. */ + uint32_t tx_ts_reg_off_fifo; + /* PTP timestamp granularity for TX. */ + uint32_t tx_ts_reg_off_granularity; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************** + * hwrm_port_qstats * + ********************/ + + +/* hwrm_port_qstats_input (size:320b/40B) */ +struct hwrm_port_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; + /* + * This is the host address where + * Tx port statistics will be stored + */ + uint64_t tx_stat_host_addr; + /* + * This is the host address where + * Rx port statistics will be stored + */ + uint64_t rx_stat_host_addr; +} __attribute__((packed)); + +/* hwrm_port_qstats_output (size:128b/16B) */ +struct hwrm_port_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The size of TX port statistics block in bytes. */ + uint16_t tx_stat_size; + /* The size of RX port statistics block in bytes. */ + uint16_t rx_stat_size; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************ + * hwrm_port_qstats_ext * + ************************/ + + +/* hwrm_port_qstats_ext_input (size:320b/40B) */ +struct hwrm_port_qstats_ext_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + /* + * The size of TX port extended + * statistics block in bytes. + */ + uint16_t tx_stat_size; + /* + * The size of RX port extended + * statistics block in bytes + */ + uint16_t rx_stat_size; + uint8_t unused_0[2]; + /* + * This is the host address where + * Tx port statistics will be stored + */ + uint64_t tx_stat_host_addr; + /* + * This is the host address where + * Rx port statistics will be stored + */ + uint64_t rx_stat_host_addr; +} __attribute__((packed)); + +/* hwrm_port_qstats_ext_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The size of TX port statistics block in bytes. */ + uint16_t tx_stat_size; + /* The size of RX port statistics block in bytes. */ + uint16_t rx_stat_size; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************* + * hwrm_port_lpbk_qstats * + *************************/ + + +/* hwrm_port_lpbk_qstats_input (size:128b/16B) */ +struct hwrm_port_lpbk_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_port_lpbk_qstats_output (size:768b/96B) */ +struct hwrm_port_lpbk_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of transmitted unicast frames */ + uint64_t lpbk_ucast_frames; + /* Number of transmitted multicast frames */ + uint64_t lpbk_mcast_frames; + /* Number of transmitted broadcast frames */ + uint64_t lpbk_bcast_frames; + /* Number of transmitted bytes for unicast traffic */ + uint64_t lpbk_ucast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t lpbk_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t lpbk_bcast_bytes; + /* Total Tx Drops for loopback traffic reported by STATS block */ + uint64_t tx_stat_discard; + /* Total Tx Error Drops for loopback traffic reported by STATS block */ + uint64_t tx_stat_error; + /* Total Rx Drops for loopback traffic reported by STATS block */ + uint64_t rx_stat_discard; + /* Total Rx Error Drops for loopback traffic reported by STATS block */ + uint64_t rx_stat_error; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_port_clr_stats * + ***********************/ + + +/* hwrm_port_clr_stats_input (size:192b/24B) */ +struct hwrm_port_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_clr_stats_output (size:128b/16B) */ +struct hwrm_port_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_port_lpbk_clr_stats * + ****************************/ + + +/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */ +struct hwrm_port_lpbk_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */ +struct hwrm_port_lpbk_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_port_ts_query * + **********************/ + + +/* hwrm_port_ts_query_input (size:192b/24B) */ +struct hwrm_port_ts_query_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_LAST \ + HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_port_ts_query_output (size:192b/24B) */ +struct hwrm_port_ts_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Timestamp value of PTP message captured. */ + uint64_t ptp_msg_ts; + /* Sequence ID of the PTP message captured. */ + uint16_t ptp_msg_seqid; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_port_phy_qcaps * + ***********************/ + + +/* hwrm_port_phy_qcaps_input (size:192b/24B) */ +struct hwrm_port_phy_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port that is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_phy_qcaps_output (size:192b/24B) */ +struct hwrm_port_phy_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* PHY capability flags */ + uint8_t flags; + /* + * If set to 1, then this field indicates that the + * link is capable of supporting EEE. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED \ + UINT32_C(0x1) + /* + * If set to 1, then this field indicates that the + * PHY is capable of supporting external loopback. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EXTERNAL_LPBK_SUPPORTED \ + UINT32_C(0x2) + /* + * Reserved field. The HWRM shall set this field to 0. + * An HWRM client shall ignore this field. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_MASK \ + UINT32_C(0xfc) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_SFT 2 + /* Number of front panel ports for this device. */ + uint8_t port_cnt; + /* Not supported or unknown */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_UNKNOWN UINT32_C(0x0) + /* single port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_1 UINT32_C(0x1) + /* 2-port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_2 UINT32_C(0x2) + /* 3-port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_3 UINT32_C(0x3) + /* 4-port device */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4 UINT32_C(0x4) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_LAST \ + HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4 + /* + * This is a bit mask to indicate what speeds are supported + * as forced speeds on this link. + * For each speed that can be forced on this link, the + * corresponding mask bit shall be set to '1'. + */ + uint16_t supported_speeds_force_mode; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MB \ + UINT32_C(0x2000) + /* + * This is a bit mask to indicate what speeds are supported + * for autonegotiation on this link. + * For each speed that can be autonegotiated on this link, the + * corresponding mask bit shall be set to '1'. + */ + uint16_t supported_speeds_auto_mode; + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2GB \ + UINT32_C(0x10) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MB \ + UINT32_C(0x2000) + /* + * This is a bit mask to indicate what speeds are supported + * for EEE on this link. + * For each speed that can be autonegotiated when EEE is enabled + * on this link, the corresponding mask bit shall be set to '1'. + * This field is only valid when the eee_suppotred is set to '1'. + */ + uint16_t supported_speeds_eee_mode; + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_10GB \ + UINT32_C(0x40) + uint32_t tx_lpi_timer_low; + /* + * The lowest value of TX LPI timer that can be set on this link + * when EEE is enabled. This value is in microseconds. + * This field is valid only when_eee_supported is set to '1'. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_SFT 0 + /* + * Reserved field. The HWRM shall set this field to 0. + * An HWRM client shall ignore this field. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_MASK \ + UINT32_C(0xff000000) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_SFT 24 + uint32_t valid_tx_lpi_timer_high; + /* + * The highest value of TX LPI timer that can be set on this link + * when EEE is enabled. This value is in microseconds. + * This field is valid only when_eee_supported is set to '1'. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_SFT 0 + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_MASK \ + UINT32_C(0xff000000) + #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_SFT 24 +} __attribute__((packed)); + +/*************************** + * hwrm_port_phy_i2c_write * + ***************************/ + + +/* hwrm_port_phy_i2c_write_input (size:832b/104B) */ +struct hwrm_port_phy_i2c_write_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + uint32_t enables; + /* + * This bit must be '1' for the page_offset field to be + * configured. + */ + #define HWRM_PORT_PHY_I2C_WRITE_INPUT_ENABLES_PAGE_OFFSET \ + UINT32_C(0x1) + /* Port ID of port. */ + uint16_t port_id; + /* 8-bit I2C slave address. */ + uint8_t i2c_slave_addr; + uint8_t unused_0; + /* The page number that is being accessed over I2C. */ + uint16_t page_number; + /* Offset within the page that is being accessed over I2C. */ + uint16_t page_offset; + /* + * Length of data to write, in bytes starting at the offset + * specified above. If the offset is not specified, then + * the data shall be written from the beginning of the page. + */ + uint8_t data_length; + uint8_t unused_1[7]; + /* Up to 64B of data. */ + uint32_t data[16]; +} __attribute__((packed)); + +/* hwrm_port_phy_i2c_write_output (size:128b/16B) */ +struct hwrm_port_phy_i2c_write_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_port_phy_i2c_read * + **************************/ + + +/* hwrm_port_phy_i2c_read_input (size:320b/40B) */ +struct hwrm_port_phy_i2c_read_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + uint32_t enables; + /* + * This bit must be '1' for the page_offset field to be + * configured. + */ + #define HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET \ + UINT32_C(0x1) + /* Port ID of port. */ + uint16_t port_id; + /* 8-bit I2C slave address. */ + uint8_t i2c_slave_addr; + uint8_t unused_0; + /* The page number that is being accessed over I2C. */ + uint16_t page_number; + /* Offset within the page that is being accessed over I2C. */ + uint16_t page_offset; + /* + * Length of data to read, in bytes starting at the offset + * specified above. If the offset is not specified, then + * the data shall be read from the beginning of the page. + */ + uint8_t data_length; + uint8_t unused_1[7]; +} __attribute__((packed)); + +/* hwrm_port_phy_i2c_read_output (size:640b/80B) */ +struct hwrm_port_phy_i2c_read_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Up to 64B of data. */ + uint32_t data[16]; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_port_led_cfg * + *********************/ + + +/* hwrm_port_led_cfg_input (size:512b/64B) */ +struct hwrm_port_led_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the led0_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the led0_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE \ + UINT32_C(0x2) + /* + * This bit must be '1' for the led0_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR \ + UINT32_C(0x4) + /* + * This bit must be '1' for the led0_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON \ + UINT32_C(0x8) + /* + * This bit must be '1' for the led0_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF \ + UINT32_C(0x10) + /* + * This bit must be '1' for the led0_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the led1_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID \ + UINT32_C(0x40) + /* + * This bit must be '1' for the led1_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE \ + UINT32_C(0x80) + /* + * This bit must be '1' for the led1_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR \ + UINT32_C(0x100) + /* + * This bit must be '1' for the led1_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON \ + UINT32_C(0x200) + /* + * This bit must be '1' for the led1_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF \ + UINT32_C(0x400) + /* + * This bit must be '1' for the led1_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID \ + UINT32_C(0x800) + /* + * This bit must be '1' for the led2_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the led2_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE \ + UINT32_C(0x2000) + /* + * This bit must be '1' for the led2_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the led2_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the led2_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the led2_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID \ + UINT32_C(0x20000) + /* + * This bit must be '1' for the led3_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID \ + UINT32_C(0x40000) + /* + * This bit must be '1' for the led3_state field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE \ + UINT32_C(0x80000) + /* + * This bit must be '1' for the led3_color field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR \ + UINT32_C(0x100000) + /* + * This bit must be '1' for the led3_blink_on field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON \ + UINT32_C(0x200000) + /* + * This bit must be '1' for the led3_blink_off field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \ + UINT32_C(0x400000) + /* + * This bit must be '1' for the led3_group_id field to be + * configured. + */ + #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID \ + UINT32_C(0x800000) + /* Port ID of port whose LEDs are configured. */ + uint16_t port_id; + /* + * The number of LEDs that are being configured. + * Up to 4 LEDs can be configured with this command. + */ + uint8_t num_leds; + /* Reserved field. */ + uint8_t rsvd; + /* An identifier for the LED #0. */ + uint8_t led0_id; + /* The requested state of the LED #0. */ + uint8_t led0_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT + /* The requested color of LED #0. */ + uint8_t led0_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER + uint8_t unused_0; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led0_blink_on; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led0_blink_off; + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #0 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led0_group_id; + /* Reserved field. */ + uint8_t rsvd0; + /* An identifier for the LED #1. */ + uint8_t led1_id; + /* The requested state of the LED #1. */ + uint8_t led1_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT + /* The requested color of LED #1. */ + uint8_t led1_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER + uint8_t unused_1; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led1_blink_on; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led1_blink_off; + /* + * An identifier for the group of LEDs that LED #1 belongs + * to. + * If set to 0, then the LED #1 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #1 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led1_group_id; + /* Reserved field. */ + uint8_t rsvd1; + /* An identifier for the LED #2. */ + uint8_t led2_id; + /* The requested state of the LED #2. */ + uint8_t led2_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT + /* The requested color of LED #2. */ + uint8_t led2_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER + uint8_t unused_2; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led2_blink_on; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led2_blink_off; + /* + * An identifier for the group of LEDs that LED #2 belongs + * to. + * If set to 0, then the LED #2 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #2 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led2_group_id; + /* Reserved field. */ + uint8_t rsvd2; + /* An identifier for the LED #3. */ + uint8_t led3_id; + /* The requested state of the LED #3. */ + uint8_t led3_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT + /* The requested color of LED #3. */ + uint8_t led3_color; + /* Default */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_LAST \ + HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER + uint8_t unused_3; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led3_blink_on; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led3_blink_off; + /* + * An identifier for the group of LEDs that LED #3 belongs + * to. + * If set to 0, then the LED #3 shall not be grouped and + * shall be treated as an individual resource. + * For all other non-zero values of this field, LED #3 shall + * be grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led3_group_id; + /* Reserved field. */ + uint8_t rsvd3; +} __attribute__((packed)); + +/* hwrm_port_led_cfg_output (size:128b/16B) */ +struct hwrm_port_led_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_port_led_qcfg * + **********************/ + + +/* hwrm_port_led_qcfg_input (size:192b/24B) */ +struct hwrm_port_led_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port whose LED configuration is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_led_qcfg_output (size:448b/56B) */ +struct hwrm_port_led_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The number of LEDs that are configured on this port. + * Up to 4 LEDs can be returned in the response. + */ + uint8_t num_leds; + /* An identifier for the LED #0. */ + uint8_t led0_id; + /* The type of LED #0. */ + uint8_t led0_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID + /* The current state of the LED #0. */ + uint8_t led0_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT + /* The color of LED #0. */ + uint8_t led0_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER + uint8_t unused_0; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led0_blink_on; + /* + * If the LED #0 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led0_blink_off; + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 is not grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led0_group_id; + /* An identifier for the LED #1. */ + uint8_t led1_id; + /* The type of LED #1. */ + uint8_t led1_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID + /* The current state of the LED #1. */ + uint8_t led1_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT + /* The color of LED #1. */ + uint8_t led1_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER + uint8_t unused_1; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led1_blink_on; + /* + * If the LED #1 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led1_blink_off; + /* + * An identifier for the group of LEDs that LED #1 belongs + * to. + * If set to 0, then the LED #1 is not grouped. + * For all other non-zero values of this field, LED #1 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led1_group_id; + /* An identifier for the LED #2. */ + uint8_t led2_id; + /* The type of LED #2. */ + uint8_t led2_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID + /* The current state of the LED #2. */ + uint8_t led2_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT + /* The color of LED #2. */ + uint8_t led2_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER + uint8_t unused_2; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led2_blink_on; + /* + * If the LED #2 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led2_blink_off; + /* + * An identifier for the group of LEDs that LED #2 belongs + * to. + * If set to 0, then the LED #2 is not grouped. + * For all other non-zero values of this field, LED #2 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led2_group_id; + /* An identifier for the LED #3. */ + uint8_t led3_id; + /* The type of LED #3. */ + uint8_t led3_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID + /* The current state of the LED #3. */ + uint8_t led3_state; + /* Default state of the LED */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0) + /* Off */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1) + /* On */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2) + /* Blink */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3) + /* Blink Alternately */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT + /* The color of LED #3. */ + uint8_t led3_color; + /* Default */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) + /* Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1) + /* Green */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2) + /* Green or Amber */ + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) + #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_LAST \ + HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER + uint8_t unused_3; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED on between cycles. + */ + uint16_t led3_blink_on; + /* + * If the LED #3 state is "blink" or "blinkalt", then + * this field represents the requested time in milliseconds + * to keep LED off between cycles. + */ + uint16_t led3_blink_off; + /* + * An identifier for the group of LEDs that LED #3 belongs + * to. + * If set to 0, then the LED #3 is not grouped. + * For all other non-zero values of this field, LED #3 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led3_group_id; + uint8_t unused_4[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_port_led_qcaps * + ***********************/ + + +/* hwrm_port_led_qcaps_input (size:192b/24B) */ +struct hwrm_port_led_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Port ID of port whose LED configuration is being queried. */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_port_led_qcaps_output (size:384b/48B) */ +struct hwrm_port_led_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The number of LEDs that are configured on this port. + * Up to 4 LEDs can be returned in the response. + */ + uint8_t num_leds; + /* Reserved for future use. */ + uint8_t unused[3]; + /* An identifier for the LED #0. */ + uint8_t led0_id; + /* The type of LED #0. */ + uint8_t led0_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led0_group_id; + uint8_t unused_0; + /* The states supported by LED #0. */ + uint16_t led0_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #0. */ + uint16_t led0_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + /* An identifier for the LED #1. */ + uint8_t led1_id; + /* The type of LED #1. */ + uint8_t led1_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #1 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led1_group_id; + uint8_t unused_1; + /* The states supported by LED #1. */ + uint16_t led1_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #1. */ + uint16_t led1_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + /* An identifier for the LED #2. */ + uint8_t led2_id; + /* The type of LED #2. */ + uint8_t led2_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #0 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led2_group_id; + uint8_t unused_2; + /* The states supported by LED #2. */ + uint16_t led2_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #2. */ + uint16_t led2_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + /* An identifier for the LED #3. */ + uint8_t led3_id; + /* The type of LED #3. */ + uint8_t led3_type; + /* Speed LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) + /* Activity LED */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) + /* Invalid */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_LAST \ + HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID + /* + * An identifier for the group of LEDs that LED #3 belongs + * to. + * If set to 0, then the LED #0 cannot be grouped. + * For all other non-zero values of this field, LED #0 is + * grouped together with the LEDs with the same group ID + * value. + */ + uint8_t led3_group_id; + uint8_t unused_3; + /* The states supported by LED #3. */ + uint16_t led3_state_caps; + /* + * If set to 1, this LED is enabled. + * If set to 0, this LED is disabled. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED \ + UINT32_C(0x1) + /* + * If set to 1, off state is supported on this LED. + * If set to 0, off state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, on state is supported on this LED. + * If set to 0, on state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \ + UINT32_C(0x4) + /* + * If set to 1, blink state is supported on this LED. + * If set to 0, blink state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \ + UINT32_C(0x8) + /* + * If set to 1, blink_alt state is supported on this LED. + * If set to 0, blink_alt state is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \ + UINT32_C(0x10) + /* The colors supported by LED #3. */ + uint16_t led3_color_caps; + /* reserved. */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD \ + UINT32_C(0x1) + /* + * If set to 1, Amber color is supported on this LED. + * If set to 0, Amber color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \ + UINT32_C(0x2) + /* + * If set to 1, Green color is supported on this LED. + * If set to 0, Green color is not supported on this LED. + */ + #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \ + UINT32_C(0x4) + uint8_t unused_4[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_queue_qportcfg * + ***********************/ + + +/* hwrm_queue_qportcfg_input (size:192b/24B) */ +struct hwrm_queue_qportcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX + /* + * Port ID of port for which the queue configuration is being + * queried. This field is only required when sent by IPC. + */ + uint16_t port_id; + /* + * Drivers will set this capability when it can use + * queue_idx_service_profile to map the queues to application. + */ + uint8_t drv_qmap_cap; + /* disabled */ + #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_DISABLED UINT32_C(0x0) + /* enabled */ + #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED UINT32_C(0x1) + #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_LAST \ + HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED + uint8_t unused_0; +} __attribute__((packed)); + +/* hwrm_queue_qportcfg_output (size:256b/32B) */ +struct hwrm_queue_qportcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * The maximum number of queues that can be configured on this + * port. + * Valid values range from 1 through 8. + */ + uint8_t max_configurable_queues; + /* + * The maximum number of lossless queues that can be configured + * on this port. + * Valid values range from 0 through 8. + */ + uint8_t max_configurable_lossless_queues; + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_cfg command. + * + * Each bit represents a specific queue where bit 0 represents + * queue 0 and bit 7 represents queue 7. + * # A value of 0 indicates that the queue is not configurable + * by the hwrm_queue_cfg command. + * # A value of 1 indicates that the queue is configurable. + * # A hwrm_queue_cfg command shall return error when trying to + * configure a queue not configurable. + */ + uint8_t queue_cfg_allowed; + /* Information about queue configuration. */ + uint8_t queue_cfg_info; + /* + * If this flag is set to '1', then the queues are + * configured asymmetrically on TX and RX sides. + * If this flag is set to '0', then the queues are + * configured symmetrically on TX and RX sides. For + * symmetric configuration, the queue configuration + * including queue ids and service profiles on the + * TX side is the same as the corresponding queue + * configuration on the RX side. + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \ + UINT32_C(0x1) + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_pfcenable_cfg command. + * + * Each bit represents a specific priority where bit 0 represents + * priority 0 and bit 7 represents priority 7. + * # A value of 0 indicates that the priority is not configurable by + * the hwrm_queue_pfcenable_cfg command. + * # A value of 1 indicates that the priority is configurable. + * # A hwrm_queue_pfcenable_cfg command shall return error when + * trying to configure a priority that is not configurable. + */ + uint8_t queue_pfcenable_cfg_allowed; + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_pri2cos_cfg command. + * + * Each bit represents a specific queue where bit 0 represents + * queue 0 and bit 7 represents queue 7. + * # A value of 0 indicates that the queue is not configurable + * by the hwrm_queue_pri2cos_cfg command. + * # A value of 1 indicates that the queue is configurable. + * # A hwrm_queue_pri2cos_cfg command shall return error when + * trying to configure a queue that is not configurable. + */ + uint8_t queue_pri2cos_cfg_allowed; + /* + * Bitmask indicating which queues can be configured by the + * hwrm_queue_pri2cos_cfg command. + * + * Each bit represents a specific queue where bit 0 represents + * queue 0 and bit 7 represents queue 7. + * # A value of 0 indicates that the queue is not configurable + * by the hwrm_queue_pri2cos_cfg command. + * # A value of 1 indicates that the queue is configurable. + * # A hwrm_queue_pri2cos_cfg command shall return error when + * trying to configure a queue not configurable. + */ + uint8_t queue_cos2bw_cfg_allowed; + /* + * ID of CoS Queue 0. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id0; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id0_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 1. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id1; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id1_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 2. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id2; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id2_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 3. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id3; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id3_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 4. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id4; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id4_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 5. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id5; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id5_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 6. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id6; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id6_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + /* + * ID of CoS Queue 7. + * FF - Invalid id + * + * # This ID can be used on any subsequent call to an hwrm command + * that takes a queue id. + * # IDs must always be queried by this command before any use + * by the driver or software. + * # Any driver or software should not make any assumptions about + * queue IDs. + * # A value of 0xff indicates that the queue is not available. + * # Available queues may not be in sequential order. + */ + uint8_t queue_id7; + /* This value is applicable to CoS queues only. */ + uint8_t queue_id7_service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \ + UINT32_C(0x0) + /* Lossless (legacy) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \ + UINT32_C(0x1) + /* Lossless RoCE */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE \ + UINT32_C(0x1) + /* Lossy RoCE CNP */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP \ + UINT32_C(0x2) + /* Lossless NIC */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC \ + UINT32_C(0x3) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \ + UINT32_C(0xff) + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/******************* + * hwrm_queue_qcfg * + *******************/ + + +/* hwrm_queue_qcfg_input (size:192b/24B) */ +struct hwrm_queue_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX + /* Queue ID of the queue. */ + uint32_t queue_id; +} __attribute__((packed)); + +/* hwrm_queue_qcfg_output (size:128b/16B) */ +struct hwrm_queue_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * This value is a the estimate packet length used in the + * TX arbiter. + */ + uint32_t queue_len; + /* This value is applicable to CoS queues only. */ + uint8_t service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0) + /* Lossless */ + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff) + #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN + /* Information about queue configuration. */ + uint8_t queue_cfg_info; + /* + * If this flag is set to '1', then the queue is + * configured asymmetrically on TX and RX sides. + * If this flag is set to '0', then this queue is + * configured symmetrically on TX and RX sides. + */ + #define HWRM_QUEUE_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \ + UINT32_C(0x1) + uint8_t unused_0; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/****************** + * hwrm_queue_cfg * + ******************/ + + +/* hwrm_queue_cfg_input (size:320b/40B) */ +struct hwrm_queue_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX, or both directions applicable to the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3) + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_SFT 0 + /* tx path */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + /* Bi-directional (Symmetrically applicable to TX and RX paths) */ + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2) + #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR + uint32_t enables; + /* + * This bit must be '1' for the dflt_len field to be + * configured. + */ + #define HWRM_QUEUE_CFG_INPUT_ENABLES_DFLT_LEN UINT32_C(0x1) + /* + * This bit must be '1' for the service_profile field to be + * configured. + */ + #define HWRM_QUEUE_CFG_INPUT_ENABLES_SERVICE_PROFILE UINT32_C(0x2) + /* Queue ID of queue that is to be configured by this function. */ + uint32_t queue_id; + /* + * This value is a the estimate packet length used in the + * TX arbiter. + * Set to 0xFF... (All Fs) to not adjust this value. + */ + uint32_t dflt_len; + /* This value is applicable to CoS queues only. */ + uint8_t service_profile; + /* Lossy (best-effort) */ + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0) + /* Lossless */ + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1) + /* Set to 0xFF... (All Fs) if there is no service profile specified */ + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff) + #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LAST \ + HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN + uint8_t unused_0[7]; +} __attribute__((packed)); + +/* hwrm_queue_cfg_output (size:128b/16B) */ +struct hwrm_queue_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/***************************** + * hwrm_queue_pfcenable_qcfg * + *****************************/ + + +/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; + /* If set to 1, then PFC is enabled on PRI 0. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI0_PFC_ENABLED \ + UINT32_C(0x1) + /* If set to 1, then PFC is enabled on PRI 1. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI1_PFC_ENABLED \ + UINT32_C(0x2) + /* If set to 1, then PFC is enabled on PRI 2. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI2_PFC_ENABLED \ + UINT32_C(0x4) + /* If set to 1, then PFC is enabled on PRI 3. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI3_PFC_ENABLED \ + UINT32_C(0x8) + /* If set to 1, then PFC is enabled on PRI 4. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI4_PFC_ENABLED \ + UINT32_C(0x10) + /* If set to 1, then PFC is enabled on PRI 5. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI5_PFC_ENABLED \ + UINT32_C(0x20) + /* If set to 1, then PFC is enabled on PRI 6. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI6_PFC_ENABLED \ + UINT32_C(0x40) + /* If set to 1, then PFC is enabled on PRI 7. */ + #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI7_PFC_ENABLED \ + UINT32_C(0x80) + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_queue_pfcenable_cfg * + ****************************/ + + +/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* If set to 1, then PFC is requested to be enabled on PRI 0. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI0_PFC_ENABLED \ + UINT32_C(0x1) + /* If set to 1, then PFC is requested to be enabled on PRI 1. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI1_PFC_ENABLED \ + UINT32_C(0x2) + /* If set to 1, then PFC is requested to be enabled on PRI 2. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI2_PFC_ENABLED \ + UINT32_C(0x4) + /* If set to 1, then PFC is requested to be enabled on PRI 3. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI3_PFC_ENABLED \ + UINT32_C(0x8) + /* If set to 1, then PFC is requested to be enabled on PRI 4. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI4_PFC_ENABLED \ + UINT32_C(0x10) + /* If set to 1, then PFC is requested to be enabled on PRI 5. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI5_PFC_ENABLED \ + UINT32_C(0x20) + /* If set to 1, then PFC is requested to be enabled on PRI 6. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI6_PFC_ENABLED \ + UINT32_C(0x40) + /* If set to 1, then PFC is requested to be enabled on PRI 7. */ + #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI7_PFC_ENABLED \ + UINT32_C(0x80) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint16_t port_id; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*************************** + * hwrm_queue_pri2cos_qcfg * + ***************************/ + + +/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX + /* + * When this bit is set to '0', the query is + * for VLAN PRI field in tunnel headers. + * When this bit is set to '1', the query is + * for VLAN PRI field in inner packet headers. + */ + #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN UINT32_C(0x2) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + uint8_t unused_0[3]; +} __attribute__((packed)); + +/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* + * CoS Queue assigned to priority 0. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri0_cos_queue_id; + /* + * CoS Queue assigned to priority 1. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri1_cos_queue_id; + /* + * CoS Queue assigned to priority 2 This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri2_cos_queue_id; + /* + * CoS Queue assigned to priority 3. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri3_cos_queue_id; + /* + * CoS Queue assigned to priority 4. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri4_cos_queue_id; + /* + * CoS Queue assigned to priority 5. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri5_cos_queue_id; + /* + * CoS Queue assigned to priority 6. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri6_cos_queue_id; + /* + * CoS Queue assigned to priority 7. This value can only + * be changed before traffic has started. + * A value of 0xff indicates that no CoS queue is assigned to the + * specified priority. + */ + uint8_t pri7_cos_queue_id; + /* Information about queue configuration. */ + uint8_t queue_cfg_info; + /* + * If this flag is set to '1', then the PRI to CoS + * configuration is asymmetric on TX and RX sides. + * If this flag is set to '0', then PRI to CoS configuration + * is symmetric on TX and RX sides. + */ + #define HWRM_QUEUE_PRI2COS_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \ + UINT32_C(0x1) + uint8_t unused_0[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_queue_pri2cos_cfg * + **************************/ + + +/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */ +struct hwrm_queue_pri2cos_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX, or both directions applicable to the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3) + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_SFT 0 + /* tx path */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + /* Bi-directional (Symmetrically applicable to TX and RX paths) */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2) + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR + /* + * When this bit is set to '0', the mapping is requested + * for VLAN PRI field in tunnel headers. + * When this bit is set to '1', the mapping is requested + * for VLAN PRI field in inner packet headers. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_IVLAN UINT32_C(0x4) + uint32_t enables; + /* + * This bit must be '1' for the pri0_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI0_COS_QUEUE_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the pri1_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI1_COS_QUEUE_ID \ + UINT32_C(0x2) + /* + * This bit must be '1' for the pri2_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI2_COS_QUEUE_ID \ + UINT32_C(0x4) + /* + * This bit must be '1' for the pri3_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI3_COS_QUEUE_ID \ + UINT32_C(0x8) + /* + * This bit must be '1' for the pri4_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI4_COS_QUEUE_ID \ + UINT32_C(0x10) + /* + * This bit must be '1' for the pri5_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI5_COS_QUEUE_ID \ + UINT32_C(0x20) + /* + * This bit must be '1' for the pri6_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI6_COS_QUEUE_ID \ + UINT32_C(0x40) + /* + * This bit must be '1' for the pri7_cos_queue_id field to be + * configured. + */ + #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI7_COS_QUEUE_ID \ + UINT32_C(0x80) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + /* + * CoS Queue assigned to priority 0. This value can only + * be changed before traffic has started. + */ + uint8_t pri0_cos_queue_id; + /* + * CoS Queue assigned to priority 1. This value can only + * be changed before traffic has started. + */ + uint8_t pri1_cos_queue_id; + /* + * CoS Queue assigned to priority 2 This value can only + * be changed before traffic has started. + */ + uint8_t pri2_cos_queue_id; + /* + * CoS Queue assigned to priority 3. This value can only + * be changed before traffic has started. + */ + uint8_t pri3_cos_queue_id; + /* + * CoS Queue assigned to priority 4. This value can only + * be changed before traffic has started. + */ + uint8_t pri4_cos_queue_id; + /* + * CoS Queue assigned to priority 5. This value can only + * be changed before traffic has started. + */ + uint8_t pri5_cos_queue_id; + /* + * CoS Queue assigned to priority 6. This value can only + * be changed before traffic has started. + */ + uint8_t pri6_cos_queue_id; + /* + * CoS Queue assigned to priority 7. This value can only + * be changed before traffic has started. + */ + uint8_t pri7_cos_queue_id; + uint8_t unused_0[7]; +} __attribute__((packed)); + +/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */ +struct hwrm_queue_pri2cos_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_queue_cos2bw_qcfg * + **************************/ + + +/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */ +struct hwrm_queue_cos2bw_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure TC BW assignment on this port. + */ + uint16_t port_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */ +struct hwrm_queue_cos2bw_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* ID of CoS Queue 0. */ + uint8_t queue_id0; + uint8_t unused_0; + uint16_t unused_1; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id0_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id0_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id0_bw_weight; + /* ID of CoS Queue 1. */ + uint8_t queue_id1; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id1_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id1_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id1_bw_weight; + /* ID of CoS Queue 2. */ + uint8_t queue_id2; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id2_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id2_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id2_bw_weight; + /* ID of CoS Queue 3. */ + uint8_t queue_id3; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id3_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id3_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id3_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id3_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id3_bw_weight; + /* ID of CoS Queue 4. */ + uint8_t queue_id4; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id4_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id4_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id4_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id4_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id4_bw_weight; + /* ID of CoS Queue 5. */ + uint8_t queue_id5; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id5_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id5_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id5_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id5_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id5_bw_weight; + /* ID of CoS Queue 6. */ + uint8_t queue_id6; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id6_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id6_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id6_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id6_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id6_bw_weight; + /* ID of CoS Queue 7. */ + uint8_t queue_id7; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id7_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id7_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id7_bw_weight; + uint8_t unused_2[4]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************* + * hwrm_queue_cos2bw_cfg * + *************************/ + + +/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */ +struct hwrm_queue_cos2bw_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + uint32_t enables; + /* + * If this bit is set to 1, then all queue_id0 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID0_VALID \ + UINT32_C(0x1) + /* + * If this bit is set to 1, then all queue_id1 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID1_VALID \ + UINT32_C(0x2) + /* + * If this bit is set to 1, then all queue_id2 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID2_VALID \ + UINT32_C(0x4) + /* + * If this bit is set to 1, then all queue_id3 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID3_VALID \ + UINT32_C(0x8) + /* + * If this bit is set to 1, then all queue_id4 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID4_VALID \ + UINT32_C(0x10) + /* + * If this bit is set to 1, then all queue_id5 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID5_VALID \ + UINT32_C(0x20) + /* + * If this bit is set to 1, then all queue_id6 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID6_VALID \ + UINT32_C(0x40) + /* + * If this bit is set to 1, then all queue_id7 related + * parameters in this command are valid. + */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID7_VALID \ + UINT32_C(0x80) + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure TC BW assignment on this port. + */ + uint16_t port_id; + /* ID of CoS Queue 0. */ + uint8_t queue_id0; + uint8_t unused_0; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id0_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id0_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id0_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id0_bw_weight; + /* ID of CoS Queue 1. */ + uint8_t queue_id1; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id1_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id1_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id1_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id1_bw_weight; + /* ID of CoS Queue 2. */ + uint8_t queue_id2; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id2_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id2_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) /* - * If set to 1 then FW based LLDP agent is enabled and running - * on the port associated with this function. If set to 0 then - * the LLDP agent is not running in the firmware. + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED UINT32_C(0x10) + uint8_t queue_id2_pri_lvl; /* - * If set to 1, then multi-host mode is active for this - * function. If set to 0, then multi-host mode is inactive for - * this function or not applicable for this device. + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. */ - #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST UINT32_C(0x20) - uint8_t mac_address[6]; + uint8_t queue_id2_bw_weight; + /* ID of CoS Queue 3. */ + uint8_t queue_id3; /* - * This value is current MAC address configured for this - * function. A value of 00-00-00-00-00-00 indicates no MAC - * address is currently configured. + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - uint16_t pci_id; + uint32_t queue_id3_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID /* - * This value is current PCI ID of this function. If ARI is - * enabled, then it is Bus Number (8b):Function Number(8b). - * Otherwise, it is Bus Number (8b):Device Number (4b):Function - * Number(4b). + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - uint16_t alloc_rsscos_ctx; + uint32_t queue_id3_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id3_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) /* - * The number of RSS/COS contexts currently allocated to the - * function. + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. */ - uint16_t alloc_cmpl_rings; + uint8_t queue_id3_pri_lvl; /* - * The number of completion rings currently allocated to the - * function. This does not include the rings allocated to any - * children functions if any. + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. */ - uint16_t alloc_tx_rings; + uint8_t queue_id3_bw_weight; + /* ID of CoS Queue 4. */ + uint8_t queue_id4; /* - * The number of transmit rings currently allocated to the - * function. This does not include the rings allocated to any - * children functions if any. + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - uint16_t alloc_rx_rings; + uint32_t queue_id4_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID /* - * The number of receive rings currently allocated to the - * function. This does not include the rings allocated to any - * children functions if any. + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - uint16_t alloc_l2_ctx; - /* The allocated number of L2 contexts to the function. */ - uint16_t alloc_vnics; - /* The allocated number of vnics to the function. */ - uint16_t mtu; + uint32_t queue_id4_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id4_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) /* - * The maximum transmission unit of the function. For rings - * allocated on this function, this default value is used if - * ring MTU is not specified. + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. */ - uint16_t mru; + uint8_t queue_id4_pri_lvl; /* - * The maximum receive unit of the function. For vnics allocated - * on this function, this default value is used if vnic MRU is - * not specified. + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. */ - uint16_t stat_ctx_id; - /* The statistics context assigned to a function. */ - uint8_t port_partition_type; + uint8_t queue_id4_bw_weight; + /* ID of CoS Queue 5. */ + uint8_t queue_id5; /* - * The HWRM shall return Unknown value for this field when this - * command is used to query VF's configuration. + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - /* Single physical function */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0) - /* Multiple physical functions */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1) - /* Network Partitioning 1.0 */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2) - /* Network Partitioning 1.5 */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 UINT32_C(0x3) - /* Network Partitioning 2.0 */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4) - /* Unknown */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN UINT32_C(0xff) - uint8_t port_pf_cnt; + uint32_t queue_id5_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID /* - * This field will indicate number of physical functions on this - * port_partition. HWRM shall return unavail (i.e. value of 0) - * for this field when this command is used to query VF's - * configuration or from older firmware that doesn't support - * this field. + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ - /* number of PFs is not available */ - #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0) - uint16_t dflt_vnic_id; - /* The default VNIC ID assigned to a function that is being queried. */ - uint16_t max_mtu_configured; - /* - * This value specifies the MAX MTU that can be configured by - * host drivers. This 'max_mtu_configure' can be HW max MTU or - * OEM applications specified value. Host drivers can't - * configure the MTU greater than this value. Host drivers - * should read this value prior to configuring the MTU. FW will - * fail the host request with MTU greater than - * 'max_mtu_configured'. - */ - uint32_t min_bw; - /* - * Minimum BW allocated for this function. The HWRM will - * translate this value into byte counter and time interval used - * for the scheduler inside the device. A value of 0 indicates - * the minimum bandwidth is not configured. + uint32_t queue_id5_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id5_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id5_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id5_bw_weight; + /* ID of CoS Queue 6. */ + uint8_t queue_id6; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ + uint32_t queue_id6_min_bw; /* The bandwidth value. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff) - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0 - /* The granularity of the value (bits or bytes). */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE UINT32_C(0x10000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE \ + UINT32_C(0x10000000) /* Value is in bits. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) /* Value is in bytes. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \ (UINT32_C(0x1) << 28) - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \ - FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES /* bw_value_unit is 3 b */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \ UINT32_C(0xe0000000) - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mb or MB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \ (UINT32_C(0x0) << 29) - /* Value is in Kb or KB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \ + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \ (UINT32_C(0x2) << 29) /* Value is in bits or bytes. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \ (UINT32_C(0x4) << 29) - /* Value is in Gb or GB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \ (UINT32_C(0x6) << 29) /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ (UINT32_C(0x1) << 29) /* Invalid unit */ - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \ (UINT32_C(0x7) << 29) - #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \ - FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID - uint32_t max_bw; + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID /* - * Maximum BW allocated for this function. The HWRM will - * translate this value into byte counter and time interval used - * for the scheduler inside the device. A value of 0 indicates - * that the maximum bandwidth is not configured. + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. */ + uint32_t queue_id6_max_bw; /* The bandwidth value. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff) - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0 - /* The granularity of the value (bits or bytes). */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE UINT32_C(0x10000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE \ + UINT32_C(0x10000000) /* Value is in bits. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) /* Value is in bytes. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \ (UINT32_C(0x1) << 28) - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \ - FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES /* bw_value_unit is 3 b */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \ UINT32_C(0xe0000000) - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mb or MB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \ (UINT32_C(0x0) << 29) - /* Value is in Kb or KB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \ (UINT32_C(0x2) << 29) /* Value is in bits or bytes. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \ (UINT32_C(0x4) << 29) - /* Value is in Gb or GB (base 10). */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \ (UINT32_C(0x6) << 29) /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ (UINT32_C(0x1) << 29) /* Invalid unit */ - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \ (UINT32_C(0x7) << 29) - #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \ - FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID - uint8_t evb_mode; + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id6_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) /* - * This value indicates the Edge virtual bridge mode for the - * domain that this function belongs to. + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id6_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id6_bw_weight; + /* ID of CoS Queue 7. */ + uint8_t queue_id7; + /* + * Minimum BW allocated to CoS Queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_min_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + /* + * Maximum BW allocated to CoS queue. + * The HWRM will translate this value into byte counter and + * time interval used for this COS inside the device. + */ + uint32_t queue_id7_max_bw; + /* The bandwidth value. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + /* Transmission Selection Algorithm (TSA) for CoS Queue. */ + uint8_t queue_id7_tsa_assign; + /* Strict Priority */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_SP \ + UINT32_C(0x0) + /* Enhanced Transmission Selection */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_ETS \ + UINT32_C(0x1) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \ + UINT32_C(0x2) + /* reserved. */ + #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \ + UINT32_C(0xff) + /* + * Priority level for strict priority. Valid only when the + * tsa_assign is 0 - Strict Priority (SP) + * 0..7 - Valid values. + * 8..255 - Reserved. + */ + uint8_t queue_id7_pri_lvl; + /* + * Weight used to allocate remaining BW for this COS after + * servicing guaranteed bandwidths for all COS. + */ + uint8_t queue_id7_bw_weight; + uint8_t unused_1[5]; +} __attribute__((packed)); + +/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */ +struct hwrm_queue_cos2bw_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************* + * hwrm_queue_dscp_qcaps * + *************************/ + + +/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */ +struct hwrm_queue_dscp_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. + */ + uint8_t port_id; + uint8_t unused_0[7]; +} __attribute__((packed)); + +/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */ +struct hwrm_queue_dscp_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The number of bits provided by the hardware for the DSCP value. */ + uint8_t num_dscp_bits; + uint8_t unused_0; + /* Max number of DSCP-MASK-PRI entries supported. */ + uint16_t max_entries; + uint8_t unused_1[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_queue_dscp2pri_qcfg * + ****************************/ + + +/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */ +struct hwrm_queue_dscp2pri_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* + * This is the host address where the 24-bits DSCP-MASK-PRI + * tuple(s) will be copied to. */ - /* No Edge Virtual Bridging (EVB) */ - #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0) - /* Virtual Ethernet Bridge (VEB) */ - #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1) - /* Virtual Ethernet Port Aggregator (VEPA) */ - #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2) - uint8_t unused_0; - uint16_t alloc_vfs; + uint64_t dest_data_addr; /* - * The number of VFs that are allocated to the function. This is - * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if - * this command is called on a PF with SR-IOV disabled or on a - * VF. + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. */ - uint32_t alloc_mcast_filters; + uint8_t port_id; + uint8_t unused_0; + /* Size of the buffer pointed to by dest_data_addr. */ + uint16_t dest_data_buffer_size; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * The number of allocated multicast filters for this function - * on the RX side. + * A count of the number of DSCP-MASK-PRI tuple(s) pointed to + * by the dest_data_addr. */ - uint32_t alloc_hw_ring_grps; - /* The number of allocated HW ring groups for this function. */ - uint16_t alloc_sp_tx_rings; + uint16_t entry_cnt; /* - * The number of strict priority transmit rings out of currently - * allocated TX rings to the function (alloc_tx_rings). + * This is the default PRI which un-initialized DSCP values are + * mapped to. */ - uint8_t unused_1; - uint8_t valid; + uint8_t default_pri; + uint8_t unused_0[4]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_func_vlan_qcfg */ -/* - * Description: This command should be called by PF driver to get the current - * C-TAG, S-TAG and correcponsing PCP and TPID values configured for the - * function. - */ -/* Input (24 bytes) */ -struct hwrm_func_vlan_qcfg_input { - uint16_t req_type; +/*************************** + * hwrm_queue_dscp2pri_cfg * + ***************************/ + + +/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */ +struct hwrm_queue_dscp2pri_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t fid; + uint64_t resp_addr; /* - * Function ID of the function that is being configured. If set - * to 0xFF... (All Fs), then the configuration is for the - * requesting function. + * This is the host address where the 24-bits DSCP-MASK-PRI tuple + * will be copied from. */ - uint16_t unused_0[3]; -}; - -/* Output (40 bytes) */ -struct hwrm_func_vlan_qcfg_output { - uint16_t error_code; + uint64_t src_data_addr; + uint32_t flags; + /* use_hw_default_pri is 1 b */ + #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_FLAGS_USE_HW_DEFAULT_PRI \ + UINT32_C(0x1) + uint32_t enables; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This bit must be '1' for the default_pri field to be + * configured. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; - /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. - */ - uint16_t stag_vid; - /* S-TAG VLAN identifier configured for the function. */ - uint8_t stag_pcp; - /* S-TAG PCP value configured for the function. */ - uint8_t unused_4; - uint16_t stag_tpid; + #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_ENABLES_DEFAULT_PRI \ + UINT32_C(0x1) /* - * S-TAG TPID value configured for the function. This field is - * specified in network byte order. + * Port ID of port for which the table is being configured. + * The HWRM needs to check whether this function is allowed + * to configure pri2cos mapping on this port. */ - uint16_t ctag_vid; - /* C-TAG VLAN identifier configured for the function. */ - uint8_t ctag_pcp; - /* C-TAG PCP value configured for the function. */ - uint8_t unused_5; - uint16_t ctag_tpid; + uint8_t port_id; /* - * C-TAG TPID value configured for the function. This field is - * specified in network byte order. + * This is the default PRI which un-initialized DSCP values will be + * mapped to. */ - uint32_t rsvd2; - /* Future use. */ - uint32_t rsvd3; - /* Future use. */ - uint32_t unused_6; -}; + uint8_t default_pri; + /* + * A count of the number of DSCP-MASK-PRI tuple(s) in the data pointed + * to by src_data_addr. + */ + uint16_t entry_cnt; + uint8_t unused_0[4]; +} __attribute__((packed)); -/* hwrm_func_vlan_cfg */ -/* - * Description: This command allows PF driver to configure C-TAG, S-TAG and - * corresponding PCP and TPID values for a function. - */ -/* Input (48 bytes) */ -struct hwrm_func_vlan_cfg_input { - uint16_t req_type; +/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/******************* + * hwrm_vnic_alloc * + *******************/ + + +/* hwrm_vnic_alloc_input (size:192b/24B) */ +struct hwrm_vnic_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t fid; - /* - * Function ID of the function that is being configured. If set - * to 0xFF... (All Fs), then the configuration is for the - * requesting function. - */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t enables; - /* This bit must be '1' for the stag_vid field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1) - /* This bit must be '1' for the ctag_vid field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2) - /* This bit must be '1' for the stag_pcp field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4) - /* This bit must be '1' for the ctag_pcp field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8) - /* This bit must be '1' for the stag_tpid field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10) - /* This bit must be '1' for the ctag_tpid field to be configured. */ - #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20) - uint16_t stag_vid; - /* S-TAG VLAN identifier configured for the function. */ - uint8_t stag_pcp; - /* S-TAG PCP value configured for the function. */ - uint8_t unused_2; - uint16_t stag_tpid; + uint16_t target_id; /* - * S-TAG TPID value configured for the function. This field is - * specified in network byte order. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t ctag_vid; - /* C-TAG VLAN identifier configured for the function. */ - uint8_t ctag_pcp; - /* C-TAG PCP value configured for the function. */ - uint8_t unused_3; - uint16_t ctag_tpid; + uint64_t resp_addr; + uint32_t flags; /* - * C-TAG TPID value configured for the function. This field is - * specified in network byte order. + * When this bit is '1', this VNIC is requested to + * be the default VNIC for this function. */ - uint32_t rsvd1; - /* Future use. */ - uint32_t rsvd2; - /* Future use. */ - uint32_t unused_4; -}; + #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1) + uint8_t unused_0[4]; +} __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_func_vlan_cfg_output { - uint16_t error_code; +/* hwrm_vnic_alloc_output (size:128b/16B) */ +struct hwrm_vnic_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Logical vnic ID */ + uint32_t vnic_id; + uint8_t unused_0[3]; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint8_t valid; +} __attribute__((packed)); + +/****************** + * hwrm_vnic_free * + ******************/ + + +/* hwrm_vnic_free_input (size:192b/24B) */ +struct hwrm_vnic_free_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ -}; + uint64_t resp_addr; + /* Logical vnic ID */ + uint32_t vnic_id; + uint8_t unused_0[4]; +} __attribute__((packed)); -/* hwrm_func_cfg */ -/* - * Description: This command allows configuration of a PF by the corresponding - * PF driver. This command also allows configuration of a child VF by its parent - * PF driver. The input FID value is used to indicate what function is being - * configured. This allows a PF driver to configure the PF owned by itself or a - * virtual function that is a child of the PF. This command allows to reserve - * resources for a VF by its parent PF. To reverse the process, the command - * should be called with all enables flags cleared for resources. This will free - * allocated resources for the VF and return them to the resource pool. If this - * command is requested by a VF driver to configure or reserve resources, then - * the HWRM shall fail this command. If default MAC address and/or VLAN are - * provided in this command, then the HWRM shall set up appropriate MAC/VLAN - * filters for the function that is being configured. If source properties - * checks are enabled and default MAC address and/or IP address are provided in - * this command, then the HWRM shall set appropriate source property checks - * based on provided MAC and/or IP addresses. The parent PF driver should not - * set MTU/MRU for a VF using this command. This is to allow MTU/MRU setting by - * the VF driver. If the MTU or MRU for a VF is set by the PF driver, then the - * HWRM should ignore it. A function's MTU/MRU should be set prior to allocating - * RX VNICs or TX rings. A PF driver calls hwrm_func_cfg to allocate resources - * for itself or its children VFs. All function drivers shall call hwrm_func_cfg - * to reserve resources. A request to hwrm_func_cfg may not be fully granted; - * that is, a request for resources may be larger than what can be supported by - * the device and the HWRM will allocate the best set of resources available, - * but that may be less than requested. If all the amounts requested could not - * be fulfilled, the HWRM shall allocate what it could and return a status code - * of success. A function driver should call hwrm_func_qcfg immediately after - * hwrm_func_cfg to determine what resources were assigned to the configured - * function. A call by a PF driver to hwrm_func_cfg to allocate resources for - * itself shall only allocate resources for the PF driver to use, not for its - * children VFs. Likewise, a call to hwrm_func_qcfg shall return the resources - * available for the PF driver to use, not what is available to its children - * VFs. - */ -/* Input (88 bytes) */ -struct hwrm_func_cfg_input { - uint16_t req_type; +/* hwrm_vnic_free_output (size:128b/16B) */ +struct hwrm_vnic_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/***************** + * hwrm_vnic_cfg * + *****************/ + + +/* hwrm_vnic_cfg_input (size:320b/40B) */ +struct hwrm_vnic_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t fid; + uint16_t target_id; /* - * Function ID of the function that is being configured. If set - * to 0xFF... (All Fs), then the configuration is for the - * requesting function. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t flags; + uint64_t resp_addr; + uint32_t flags; /* - * When this bit is '1', the function is disabled with source - * MAC address check. This is an anti-spoofing check. If this - * flag is set, then the function shall be configured to - * disallow transmission of frames with the source MAC address - * that is configured for this function. + * When this bit is '1', the VNIC is requested to + * be the default VNIC for the function. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \ + #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT \ UINT32_C(0x1) /* - * When this bit is '1', the function is enabled with source MAC - * address check. This is an anti-spoofing check. If this flag - * is set, then the function shall be configured to allow - * transmission of frames with the source MAC address that is - * configured for this function. + * When this bit is '1', the VNIC is being configured to + * strip VLAN in the RX path. + * If set to '0', then VLAN stripping is disabled on + * this VNIC. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \ + #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE \ UINT32_C(0x2) - /* reserved */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK UINT32_C(0x1fc) - #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2 /* - * Standard TX Ring mode is used for the allocation of TX ring - * and underlying scheduling resources that allow bandwidth - * reservation and limit settings on the queried function. If - * set to 1, then standard TX ring mode is requested to be - * enabled on the function being configured. + * When this bit is '1', the VNIC is being configured to + * buffer receive packets in the hardware until the host + * posts new receive buffers. + * If set to '0', then bd_stall is being configured to be + * disabled on this VNIC. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \ - UINT32_C(0x200) - /* - * Standard TX Ring mode is used for the allocation of TX ring - * and underlying scheduling resources that allow bandwidth - * reservation and limit settings on the queried function. If - * set to 1, then the standard TX ring mode is requested to be - * disabled on the function being configured. In this extended - * TX ring resource mode, the minimum and maximum bandwidth - * settings are not supported to allow the allocation of TX - * rings to span multiple scheduler nodes. - */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \ - UINT32_C(0x400) + #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE \ + UINT32_C(0x4) /* - * If this bit is set, virtual mac address configured in this - * command will be persistent over warm boot. + * When this bit is '1', the VNIC is being configured to + * receive both RoCE and non-RoCE traffic. + * If set to '0', then this VNIC is not configured to be + * operating in dual VNIC mode. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST UINT32_C(0x800) + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE \ + UINT32_C(0x8) /* - * This bit only applies to the VF. If this bit is set, the - * statistic context counters will not be cleared when the - * statistic context is freed or a function reset is called on - * VF. This bit will be cleared when the PF is unloaded or a - * function reset is called on the PF. + * When this flag is set to '1', the VNIC is requested to + * be configured to receive only RoCE traffic. + * If this flag is set to '0', then this flag shall be + * ignored by the HWRM. + * If roce_dual_vnic_mode flag is set to '1' + * or roce_mirroring_capable_vnic_mode flag to 1, + * then the HWRM client shall not set this flag to '1'. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \ - UINT32_C(0x1000) + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE \ + UINT32_C(0x10) + /* + * When a VNIC uses one destination ring group for certain + * application (e.g. Receive Flow Steering) where + * exact match is used to direct packets to a VNIC with one + * destination ring group only, there is no need to configure + * RSS indirection table for that VNIC as only one destination + * ring group is used. + * + * This flag is used to enable a mode where + * RSS is enabled in the VNIC using a RSS context + * for computing RSS hash but the RSS indirection table is + * not configured using hwrm_vnic_rss_cfg. + * + * If this mode is enabled, then the driver should not program + * RSS indirection table for the RSS context that is used for + * computing RSS hash only. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE \ + UINT32_C(0x20) /* - * This bit requests that the firmware test to see if all the - * assets requested in this command (i.e. number of TX rings) - * are available. The firmware will return an error if the - * requested assets are not available. The firwmare will NOT - * reserve the assets if they are available. + * When this bit is '1', the VNIC is being configured to + * receive both RoCE and non-RoCE traffic, but forward only the + * RoCE traffic further. Also, RoCE traffic can be mirrored to + * L2 driver. */ - #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST UINT32_C(0x2000) - uint32_t enables; - /* This bit must be '1' for the mtu field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU UINT32_C(0x1) - /* This bit must be '1' for the mru field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU UINT32_C(0x2) + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ + UINT32_C(0x40) + uint32_t enables; /* - * This bit must be '1' for the num_rsscos_ctxs field to be + * This bit must be '1' for the dflt_ring_grp field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS UINT32_C(0x4) + #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP \ + UINT32_C(0x1) /* - * This bit must be '1' for the num_cmpl_rings field to be + * This bit must be '1' for the rss_rule field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS UINT32_C(0x8) - /* This bit must be '1' for the num_tx_rings field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS UINT32_C(0x10) - /* This bit must be '1' for the num_rx_rings field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS UINT32_C(0x20) - /* This bit must be '1' for the num_l2_ctxs field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS UINT32_C(0x40) - /* This bit must be '1' for the num_vnics field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS UINT32_C(0x80) + #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE \ + UINT32_C(0x2) /* - * This bit must be '1' for the num_stat_ctxs field to be + * This bit must be '1' for the cos_rule field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS UINT32_C(0x100) + #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE \ + UINT32_C(0x4) /* - * This bit must be '1' for the dflt_mac_addr field to be + * This bit must be '1' for the lb_rule field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x200) - /* This bit must be '1' for the dflt_vlan field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN UINT32_C(0x400) - /* This bit must be '1' for the dflt_ip_addr field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR UINT32_C(0x800) - /* This bit must be '1' for the min_bw field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW UINT32_C(0x1000) - /* This bit must be '1' for the max_bw field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW UINT32_C(0x2000) + #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE \ + UINT32_C(0x8) /* - * This bit must be '1' for the async_event_cr field to be + * This bit must be '1' for the mru field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4000) + #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU \ + UINT32_C(0x10) /* - * This bit must be '1' for the vlan_antispoof_mode field to be + * This bit must be '1' for the default_rx_ring_id field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE UINT32_C(0x8000) + #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID \ + UINT32_C(0x20) /* - * This bit must be '1' for the allowed_vlan_pris field to be + * This bit must be '1' for the default_cmpl_ring_id field to be * configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS UINT32_C(0x10000) - /* This bit must be '1' for the evb_mode field to be configured. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE UINT32_C(0x20000) + #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID \ + UINT32_C(0x40) + /* Logical vnic ID */ + uint16_t vnic_id; /* - * This bit must be '1' for the num_mcast_filters field to be - * configured. + * Default Completion ring for the VNIC. This ring will + * be chosen if packet does not match any RSS rules and if + * there is no COS rule. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS UINT32_C(0x40000) + uint16_t dflt_ring_grp; /* - * This bit must be '1' for the num_hw_ring_grps field to be - * configured. + * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if + * there is no RSS rule. */ - #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS UINT32_C(0x80000) - uint16_t mtu; + uint16_t rss_rule; /* - * The maximum transmission unit of the function. The HWRM - * should make sure that the mtu of the function does not exceed - * the mtu of the physical port that this function is associated - * with. In addition to configuring mtu per function, it is - * possible to configure mtu per transmit ring. By default, the - * mtu of each transmit ring associated with a function is equal - * to the mtu of the function. The HWRM should make sure that - * the mtu of each transmit ring that is assigned to a function - * has a valid mtu. + * RSS ID for COS rule/table structure. 0xFF... (All Fs) if + * there is no COS rule. */ - uint16_t mru; + uint16_t cos_rule; /* - * The maximum receive unit of the function. The HWRM should - * make sure that the mru of the function does not exceed the - * mru of the physical port that this function is associated - * with. In addition to configuring mru per function, it is - * possible to configure mru per vnic. By default, the mru of - * each vnic associated with a function is equal to the mru of - * the function. The HWRM should make sure that the mru of each - * vnic that is assigned to a function has a valid mru. + * RSS ID for load balancing rule/table structure. + * 0xFF... (All Fs) if there is no LB rule. */ - uint16_t num_rsscos_ctxs; - /* The number of RSS/COS contexts requested for the function. */ - uint16_t num_cmpl_rings; + uint16_t lb_rule; /* - * The number of completion rings requested for the function. - * This does not include the rings allocated to any children - * functions if any. + * The maximum receive unit of the vnic. + * Each vnic is associated with a function. + * The vnic mru value overwrites the mru setting of the + * associated function. + * The HWRM shall make sure that vnic mru does not exceed + * the mru of the port the function is associated with. */ - uint16_t num_tx_rings; + uint16_t mru; /* - * The number of transmit rings requested for the function. This - * does not include the rings allocated to any children - * functions if any. + * Default Rx ring for the VNIC. This ring will + * be chosen if packet does not match any RSS rules. + * The aggregation ring associated with the Rx ring is + * implied based on the Rx ring specified when the + * aggregation ring was allocated. */ - uint16_t num_rx_rings; + uint16_t default_rx_ring_id; /* - * The number of receive rings requested for the function. This - * does not include the rings allocated to any children - * functions if any. + * Default completion ring for the VNIC. This ring will + * be chosen if packet does not match any RSS rules. */ - uint16_t num_l2_ctxs; - /* The requested number of L2 contexts for the function. */ - uint16_t num_vnics; - /* The requested number of vnics for the function. */ - uint16_t num_stat_ctxs; - /* The requested number of statistic contexts for the function. */ - uint16_t num_hw_ring_grps; + uint16_t default_cmpl_ring_id; +} __attribute__((packed)); + +/* hwrm_vnic_cfg_output (size:128b/16B) */ +struct hwrm_vnic_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * The number of HW ring groups that should be reserved for this - * function. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint8_t dflt_mac_addr[6]; - /* The default MAC address for the function being configured. */ - uint16_t dflt_vlan; + uint8_t valid; +} __attribute__((packed)); + +/****************** + * hwrm_vnic_qcfg * + ******************/ + + +/* hwrm_vnic_qcfg_input (size:256b/32B) */ +struct hwrm_vnic_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * The default VLAN for the function being configured. This - * field's format is same as 802.1Q Tag's Tag Control - * Information (TCI) format that includes both Priority Code - * Point (PCP) and VLAN Identifier (VID). + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint32_t dflt_ip_addr[4]; + uint16_t cmpl_ring; /* - * The default IP address for the function being configured. - * This address is only used in enabling source property check. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t min_bw; + uint16_t seq_id; /* - * Minimum BW allocated for this function. The HWRM will - * translate this value into byte counter and time interval used - * for the scheduler inside the device. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - /* The bandwidth value. */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff) - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0 - /* The granularity of the value (bits or bytes). */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE UINT32_C(0x10000000) - /* Value is in bits. */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28) - /* Value is in bytes. */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES (UINT32_C(0x1) << 28) - #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \ - FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES - /* bw_value_unit is 3 b */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \ - UINT32_C(0xe0000000) - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mb or MB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \ - (UINT32_C(0x0) << 29) - /* Value is in Kb or KB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \ - (UINT32_C(0x2) << 29) - /* Value is in bits or bytes. */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \ - (UINT32_C(0x4) << 29) - /* Value is in Gb or GB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \ - (UINT32_C(0x6) << 29) - /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \ - (UINT32_C(0x1) << 29) - /* Invalid unit */ - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \ - (UINT32_C(0x7) << 29) - #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \ - FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID - uint32_t max_bw; + uint16_t target_id; /* - * Maximum BW allocated for this function. The HWRM will - * translate this value into byte counter and time interval used - * for the scheduler inside the device. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* The bandwidth value. */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \ - UINT32_C(0xfffffff) - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0 - /* The granularity of the value (bits or bytes). */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE UINT32_C(0x10000000) - /* Value is in bits. */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28) - /* Value is in bytes. */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28) - #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \ - FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES - /* bw_value_unit is 3 b */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ - UINT32_C(0xe0000000) - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mb or MB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ - (UINT32_C(0x0) << 29) - /* Value is in Kb or KB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ - (UINT32_C(0x2) << 29) - /* Value is in bits or bytes. */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ - (UINT32_C(0x4) << 29) - /* Value is in Gb or GB (base 10). */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ - (UINT32_C(0x6) << 29) - /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ - (UINT32_C(0x1) << 29) - /* Invalid unit */ - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ - (UINT32_C(0x7) << 29) - #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ - FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID - uint16_t async_event_cr; + uint64_t resp_addr; + uint32_t enables; + /* + * This bit must be '1' for the vf_id_valid field to be + * configured. + */ + #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) + /* Logical vnic ID */ + uint32_t vnic_id; + /* ID of Virtual Function whose VNIC resource is being queried. */ + uint16_t vf_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_vnic_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Default Completion ring for the VNIC. */ + uint16_t dflt_ring_grp; + /* + * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if + * there is no RSS rule. + */ + uint16_t rss_rule; + /* + * RSS ID for COS rule/table structure. 0xFF... (All Fs) if + * there is no COS rule. + */ + uint16_t cos_rule; + /* + * RSS ID for load balancing rule/table structure. + * 0xFF... (All Fs) if there is no LB rule. + */ + uint16_t lb_rule; + /* The maximum receive unit of the vnic. */ + uint16_t mru; + uint8_t unused_0[2]; + uint32_t flags; /* - * ID of the target completion ring for receiving asynchronous - * event completions. If this field is not valid, then the HWRM - * shall use the default completion ring of the function that is - * being configured as the target completion ring for providing - * any asynchronous event completions for that function. If this - * field is valid, then the HWRM shall use the completion ring - * identified by this ID as the target completion ring for - * providing any asynchronous event completions for the function - * that is being configured. - */ - uint8_t vlan_antispoof_mode; - /* VLAN Anti-spoofing mode. */ - /* No VLAN anti-spoofing checks are enabled */ - #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK UINT32_C(0x0) - /* Validate VLAN against the configured VLAN(s) */ - #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \ + * When this bit is '1', the VNIC is the default VNIC for + * the function. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT \ UINT32_C(0x1) - /* Insert VLAN if it does not exist, otherwise discard */ - #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \ + /* + * When this bit is '1', the VNIC is configured to + * strip VLAN in the RX path. + * If set to '0', then VLAN stripping is disabled on + * this VNIC. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE \ UINT32_C(0x2) /* - * Insert VLAN if it does not exist, override - * VLAN if it exists + * When this bit is '1', the VNIC is configured to + * buffer receive packets in the hardware until the host + * posts new receive buffers. + * If set to '0', then bd_stall is disabled on + * this VNIC. */ - #define \ - HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \ - UINT32_C(0x3) - uint8_t allowed_vlan_pris; + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE \ + UINT32_C(0x4) /* - * This bit field defines VLAN PRIs that are allowed on this - * function. If nth bit is set, then VLAN PRI n is allowed on - * this function. + * When this bit is '1', the VNIC is configured to + * receive both RoCE and non-RoCE traffic. + * If set to '0', then this VNIC is not configured to + * operate in dual VNIC mode. */ - uint8_t evb_mode; + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE \ + UINT32_C(0x8) /* - * The HWRM shall allow a PF driver to change EVB mode for the - * partition it belongs to. The HWRM shall not allow a VF driver - * to change the EVB mode. The HWRM shall take into account the - * switching of EVB mode from one to another and reconfigure - * hardware resources as appropriately. The switching from VEB - * to VEPA mode requires the disabling of the loopback traffic. - * Additionally, source knock outs are handled differently in - * VEB and VEPA modes. + * When this flag is set to '1', the VNIC is configured to + * receive only RoCE traffic. + * When this flag is set to '0', the VNIC is not configured + * to receive only RoCE traffic. + * If roce_dual_vnic_mode flag and this flag both are set + * to '1', then it is an invalid configuration of the + * VNIC. The HWRM should not allow that type of + * mis-configuration by HWRM clients. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE \ + UINT32_C(0x10) + /* + * When a VNIC uses one destination ring group for certain + * application (e.g. Receive Flow Steering) where + * exact match is used to direct packets to a VNIC with one + * destination ring group only, there is no need to configure + * RSS indirection table for that VNIC as only one destination + * ring group is used. + * + * When this bit is set to '1', then the VNIC is enabled in a + * mode where RSS is enabled in the VNIC using a RSS context + * for computing RSS hash but the RSS indirection table is + * not configured. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE \ + UINT32_C(0x20) + /* + * When this bit is '1', the VNIC is configured to + * receive both RoCE and non-RoCE traffic, but forward only + * RoCE traffic further. Also RoCE traffic can be mirrored to + * L2 driver. */ - /* No Edge Virtual Bridging (EVB) */ - #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0) - /* Virtual Ethernet Bridge (VEB) */ - #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1) - /* Virtual Ethernet Port Aggregator (VEPA) */ - #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2) - uint8_t unused_2; - uint16_t num_mcast_filters; + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ + UINT32_C(0x40) + uint8_t unused_1[7]; /* - * The number of multicast filters that should be reserved for - * this function on the RX side. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_func_cfg_output { - uint16_t error_code; +/******************* + * hwrm_vnic_qcaps * + *******************/ + + +/* hwrm_vnic_qcaps_input (size:192b/24B) */ +struct hwrm_vnic_qcaps_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + uint32_t enables; + uint8_t unused_0[4]; } __attribute__((packed)); -/* hwrm_func_qstats */ -/* - * Description: This command returns statistics of a function. The input FID - * value is used to indicate what function is being queried. This allows a - * physical function driver to query virtual functions that are children of the - * physical function. The HWRM shall return any unsupported counter with a value - * of 0xFFFFFFFF for 32-bit counters and 0xFFFFFFFFFFFFFFFF for 64-bit counters. - */ -/* Input (24 bytes) */ -struct hwrm_func_qstats_input { - uint16_t req_type; +/* hwrm_vnic_qcaps_output (size:192b/24B) */ +struct hwrm_vnic_qcaps_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The maximum receive unit that is settable on a vnic. */ + uint16_t mru; + uint8_t unused_0[2]; + uint32_t flags; + /* Unused. */ + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_UNUSED \ + UINT32_C(0x1) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', the capability of stripping VLAN in + * the RX path is supported on VNIC(s). + * If set to '0', then VLAN stripping capability is + * not supported on VNIC(s). */ - uint16_t cmpl_ring; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP \ + UINT32_C(0x2) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', the capability to buffer receive + * packets in the hardware until the host posts new receive buffers + * is supported on VNIC(s). + * If set to '0', then bd_stall capability is not supported + * on VNIC(s). */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_BD_STALL_CAP \ + UINT32_C(0x4) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * When this bit is '1', the capability to + * receive both RoCE and non-RoCE traffic on VNIC(s) is + * supported. + * If set to '0', then the capability to receive + * both RoCE and non-RoCE traffic on VNIC(s) is + * not supported. */ - uint64_t resp_addr; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_DUAL_VNIC_CAP \ + UINT32_C(0x8) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * When this bit is set to '1', the capability to configure + * a VNIC to receive only RoCE traffic is supported. + * When this flag is set to '0', the VNIC capability to + * configure to receive only RoCE traffic is not supported. */ - uint16_t fid; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_ONLY_VNIC_CAP \ + UINT32_C(0x10) /* - * Function ID of the function that is being queried. 0xFF... - * (All Fs) if the query is for the requesting function. + * When this bit is set to '1', then the capability to enable + * a VNIC in a mode where RSS context without configuring + * RSS indirection table is supported (for RSS hash computation). + * When this bit is set to '0', then a VNIC can not be configured + * with a mode to enable RSS context without configuring RSS + * indirection table. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (176 bytes) */ -struct hwrm_func_qstats_output { - uint16_t error_code; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_DFLT_CR_CAP \ + UINT32_C(0x20) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * When this bit is '1', the capability to + * mirror the the RoCE traffic is supported. + * If set to '0', then the capability to mirror the + * RoCE traffic is not supported. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP \ + UINT32_C(0x40) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', the outermost RSS hashing capability + * is supported. If set to '0', then the outermost RSS hashing + * capability is not supported. */ - uint64_t tx_ucast_pkts; - /* Number of transmitted unicast packets on the function. */ - uint64_t tx_mcast_pkts; - /* Number of transmitted multicast packets on the function. */ - uint64_t tx_bcast_pkts; - /* Number of transmitted broadcast packets on the function. */ - uint64_t tx_err_pkts; + #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP \ + UINT32_C(0x80) + uint8_t unused_1[7]; /* - * Number of transmitted packets that were discarded due to - * internal NIC resource problems. For transmit, this can only - * happen if TMP is configured to allow dropping in HOL blocking - * conditions, which is not a normal configuration. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t tx_drop_pkts; + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_vnic_tpa_cfg * + *********************/ + + +/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */ +struct hwrm_vnic_tpa_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Number of dropped packets on transmit path on the function. - * These are packets that have been marked for drop by the TE - * CFA block or are packets that exceeded the transmit MTU limit - * for the function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t tx_ucast_bytes; - /* Number of transmitted bytes for unicast traffic on the function. */ - uint64_t tx_mcast_bytes; + uint16_t cmpl_ring; /* - * Number of transmitted bytes for multicast traffic on the - * function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t tx_bcast_bytes; + uint16_t seq_id; /* - * Number of transmitted bytes for broadcast traffic on the - * function. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t rx_ucast_pkts; - /* Number of received unicast packets on the function. */ - uint64_t rx_mcast_pkts; - /* Number of received multicast packets on the function. */ - uint64_t rx_bcast_pkts; - /* Number of received broadcast packets on the function. */ - uint64_t rx_err_pkts; + uint16_t target_id; /* - * Number of received packets that were discarded on the - * function due to resource limitations. This can happen for 3 - * reasons. # The BD used for the packet has a bad format. # - * There were no BDs available in the ring for the packet. # - * There were no BDs available on-chip for the packet. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t rx_drop_pkts; + uint64_t resp_addr; + uint32_t flags; /* - * Number of dropped packets on received path on the function. - * These are packets that have been marked for drop by the RE - * CFA. + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) of + * non-tunneled TCP packets. */ - uint64_t rx_ucast_bytes; - /* Number of received bytes for unicast traffic on the function. */ - uint64_t rx_mcast_bytes; - /* Number of received bytes for multicast traffic on the function. */ - uint64_t rx_bcast_bytes; - /* Number of received bytes for broadcast traffic on the function. */ - uint64_t rx_agg_pkts; - /* Number of aggregated unicast packets on the function. */ - uint64_t rx_agg_bytes; - /* Number of aggregated unicast bytes on the function. */ - uint64_t rx_agg_events; - /* Number of aggregation events on the function. */ - uint64_t rx_agg_aborts; - /* Number of aborted aggregations on the function. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA \ + UINT32_C(0x1) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) of + * tunneled TCP packets. */ -} __attribute__((packed)); - -/* hwrm_func_clr_stats */ -/* - * Description: This command clears statistics of a function. The input FID - * value is used to indicate what function's statistics is being cleared. This - * allows a physical function driver to clear statistics of virtual functions - * that are children of the physical function. - */ -/* Input (24 bytes) */ -struct hwrm_func_clr_stats_input { - uint16_t req_type; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA \ + UINT32_C(0x2) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) according + * to Windows Receive Segment Coalescing (RSC) rules. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE \ + UINT32_C(0x4) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) according + * to Linux Generic Receive Offload (GRO) rules. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO \ + UINT32_C(0x8) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for TCP + * packets with IP ECN set to non-zero. */ - uint64_t resp_addr; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN \ + UINT32_C(0x10) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * When this bit is '1', the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for + * GRE tunneled TCP packets only if all packets have the + * same GRE sequence. */ - uint16_t fid; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \ + UINT32_C(0x20) /* - * Function ID of the function. 0xFF... (All Fs) if the query is - * for the requesting function. + * When this bit is '1' and the GRO mode is enabled, + * the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for + * TCP/IPv4 packets with consecutively increasing IPIDs. + * In other words, the last packet that is being + * aggregated to an already existing aggregation context + * shall have IPID 1 more than the IPID of the last packet + * that was aggregated in that aggregation context. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_func_clr_stats_output { - uint16_t error_code; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK \ + UINT32_C(0x40) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * When this bit is '1' and the GRO mode is enabled, + * the VNIC shall be configured to + * perform transparent packet aggregation (TPA) for + * TCP packets with the same TTL (IPv4) or Hop limit (IPv6) + * value. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK \ + UINT32_C(0x80) + uint32_t enables; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This bit must be '1' for the max_agg_segs field to be + * configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the max_aggs field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_func_vf_vnic_ids_query */ -/* Description: This command is used to query vf vnic ids. */ -/* Input (32 bytes) */ -struct hwrm_func_vf_vnic_ids_query_input { - uint16_t req_type; + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This bit must be '1' for the max_agg_timer field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the min_agg_len field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8) + /* Logical vnic ID */ + uint16_t vnic_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This is the maximum number of TCP segments that can + * be aggregated (unit is Log2). Max value is 31. */ - uint64_t resp_addr; + uint16_t max_agg_segs; + /* 1 segment */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0) + /* 2 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1) + /* 4 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2) + /* 8 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3) + /* Any segment size larger than this is not valid */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f) + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_LAST \ + HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This is the maximum number of aggregations this VNIC is + * allowed (unit is Log2). Max value is 7 + */ + uint16_t max_aggs; + /* 1 aggregation */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0) + /* 2 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1) + /* 4 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2) + /* 8 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3) + /* 16 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4) + /* Any aggregation size larger than this is not valid */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7) + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_LAST \ + HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX + uint8_t unused_0[2]; + /* + * This is the maximum amount of time allowed for + * an aggregation context to complete after it was initiated. */ - uint16_t vf_id; + uint32_t max_agg_timer; /* - * This value is used to identify a Virtual Function (VF). The - * scope of VF ID is local within a PF. + * This is the minimum amount of payload length required to + * start an aggregation context. */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t max_vnic_id_cnt; - /* Max number of vnic ids in vnic id table */ - uint64_t vnic_id_tbl_addr; - /* This is the address for VF VNIC ID table */ + uint32_t min_agg_len; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_func_vf_vnic_ids_query_output { - uint16_t error_code; +/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */ +struct hwrm_vnic_tpa_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_vnic_tpa_qcfg * + **********************/ + + +/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_tpa_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t vnic_id_cnt; + uint16_t seq_id; /* - * Actual number of vnic ids Each VNIC ID is written as a 32-bit - * number. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* Logical vnic ID */ + uint16_t vnic_id; + uint8_t unused_0[6]; } __attribute__((packed)); -/* hwrm_func_drv_rgtr */ -/* - * Description: This command is used by the function driver to register its - * information with the HWRM. A function driver shall implement this command. A - * function driver shall use this command during the driver initialization right - * after the HWRM version discovery and default ring resources allocation. - */ -/* Input (80 bytes) */ -struct hwrm_func_drv_rgtr_input { - uint16_t req_type; +/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_tpa_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) of + * non-tunneled TCP packets. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_TPA \ + UINT32_C(0x1) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) of + * tunneled TCP packets. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_ENCAP_TPA \ + UINT32_C(0x2) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) according + * to Windows Receive Segment Coalescing (RSC) rules. */ - uint64_t resp_addr; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_RSC_WND_UPDATE \ + UINT32_C(0x4) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) according + * to Linux Generic Receive Offload (GRO) rules. */ - uint32_t flags; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO \ + UINT32_C(0x8) /* - * When this bit is '1', the function driver is requesting all - * requests from its children VF drivers to be forwarded to - * itself. This flag can only be set by the PF driver. If a VF - * driver sets this flag, it should be ignored by the HWRM. + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) for TCP + * packets with IP ECN set to non-zero. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1) + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_ECN \ + UINT32_C(0x10) /* - * When this bit is '1', the function is requesting none of the - * requests from its children VF drivers to be forwarded to - * itself. This flag can only be set by the PF driver. If a VF - * driver sets this flag, it should be ignored by the HWRM. + * When this bit is '1', the VNIC is configured to + * perform transparent packet aggregation (TPA) for + * GRE tunneled TCP packets only if all packets have the + * same GRE sequence. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2) - uint32_t enables; - /* This bit must be '1' for the os_type field to be configured. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE UINT32_C(0x1) - /* This bit must be '1' for the ver field to be configured. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER UINT32_C(0x2) - /* This bit must be '1' for the timestamp field to be configured. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP UINT32_C(0x4) - /* This bit must be '1' for the vf_req_fwd field to be configured. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD UINT32_C(0x8) + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \ + UINT32_C(0x20) /* - * This bit must be '1' for the async_event_fwd field to be - * configured. + * When this bit is '1' and the GRO mode is enabled, + * the VNIC is configured to + * perform transparent packet aggregation (TPA) for + * TCP/IPv4 packets with consecutively increasing IPIDs. + * In other words, the last packet that is being + * aggregated to an already existing aggregation context + * shall have IPID 1 more than the IPID of the last packet + * that was aggregated in that aggregation context. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10) - uint16_t os_type; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_IPID_CHECK \ + UINT32_C(0x40) /* - * This value indicates the type of OS. The values are based on - * CIM_OperatingSystem.mof file as published by the DMTF. + * When this bit is '1' and the GRO mode is enabled, + * the VNIC is configured to + * perform transparent packet aggregation (TPA) for + * TCP packets with the same TTL (IPv4) or Hop limit (IPv6) + * value. */ - /* Unknown */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0) - /* Other OS not listed below. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1) - /* MSDOS OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe) - /* Windows OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12) - /* Solaris OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d) - /* Linux OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24) - /* FreeBSD OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a) - /* VMware ESXi OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68) - /* Microsoft Windows 8 64-bit OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73) - /* Microsoft Windows Server 2012 R2 OS. */ - #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74) - uint8_t ver_maj; - /* This is the major version of the driver. */ - uint8_t ver_min; - /* This is the minor version of the driver. */ - uint8_t ver_upd; - /* This is the update version of the driver. */ - uint8_t unused_0; - uint16_t unused_1; - uint32_t timestamp; - /* - * This is a 32-bit timestamp provided by the driver for keep - * alive. The timestamp is in multiples of 1ms. - */ - uint32_t unused_2; - uint32_t vf_req_fwd[8]; + #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_TTL_CHECK \ + UINT32_C(0x80) /* - * This is a 256-bit bit mask provided by the PF driver for - * letting the HWRM know what commands issued by the VF driver - * to the HWRM should be forwarded to the PF driver. Nth bit - * refers to the Nth req_type. Setting Nth bit to 1 indicates - * that requests from the VF driver with req_type equal to N - * shall be forwarded to the parent PF driver. This field is not - * valid for the VF driver. + * This is the maximum number of TCP segments that can + * be aggregated (unit is Log2). Max value is 31. */ - uint32_t async_event_fwd[8]; + uint16_t max_agg_segs; + /* 1 segment */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_1 UINT32_C(0x0) + /* 2 segments */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_2 UINT32_C(0x1) + /* 4 segments */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_4 UINT32_C(0x2) + /* 8 segments */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_8 UINT32_C(0x3) + /* Any segment size larger than this is not valid */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f) + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_LAST \ + HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX /* - * This is a 256-bit bit mask provided by the function driver - * (PF or VF driver) to indicate the list of asynchronous event - * completions to be forwarded. Nth bit refers to the Nth - * event_id. Setting Nth bit to 1 by the function driver shall - * result in the HWRM forwarding asynchronous event completion - * with event_id equal to N. If all bits are set to 0 (value of - * 0), then the HWRM shall not forward any asynchronous event - * completion to this function driver. + * This is the maximum number of aggregations this VNIC is + * allowed (unit is Log2). Max value is 7 */ -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_func_drv_rgtr_output { - uint16_t error_code; + uint16_t max_aggs; + /* 1 aggregation */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_1 UINT32_C(0x0) + /* 2 aggregations */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_2 UINT32_C(0x1) + /* 4 aggregations */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_4 UINT32_C(0x2) + /* 8 aggregations */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_8 UINT32_C(0x3) + /* 16 aggregations */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_16 UINT32_C(0x4) + /* Any aggregation size larger than this is not valid */ + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX UINT32_C(0x7) + #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_LAST \ + HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This is the maximum amount of time allowed for + * an aggregation context to complete after it was initiated. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint32_t max_agg_timer; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This is the minimum amount of payload length required to + * start an aggregation context. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint32_t min_agg_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_func_drv_unrgtr */ -/* - * Description: This command is used by the function driver to un register with - * the HWRM. A function driver shall implement this command. A function driver - * shall use this command during the driver unloading. - */ -/* Input (24 bytes) */ -struct hwrm_func_drv_unrgtr_input { - uint16_t req_type; +/********************* + * hwrm_vnic_rss_cfg * + *********************/ + + +/* hwrm_vnic_rss_cfg_input (size:384b/48B) */ +struct hwrm_vnic_rss_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t hash_type; + /* + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv4 + * packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of TCP/IPv4 packets. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of UDP/IPv4 packets. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv6 + * packets. */ - uint64_t resp_addr; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of TCP/IPv6 packets. */ - uint32_t flags; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10) /* - * When this bit is '1', the function driver is notifying the - * HWRM to prepare for the shutdown. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of UDP/IPv6 packets. */ - #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \ - UINT32_C(0x1) - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_func_drv_unrgtr_output { - uint16_t error_code; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20) + /* VNIC ID of VNIC associated with RSS table being configured. */ + uint16_t vnic_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Specifies which VNIC ring table pair to configure. + * Valid values range from 0 to 7. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint8_t ring_table_pair_index; + /* Flags to specify different RSS hash modes. */ + uint8_t hash_mode_flags; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', it indicates using current RSS + * hash mode setting configured in the device. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT \ + UINT32_C(0x1) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 4 tuples {l3.src, l3.dest, + * l4.src, l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. */ -} __attribute__((packed)); - -/* hwrm_func_buf_rgtr */ -/* - * Description: This command is used by the PF driver to register buffers used - * in the PF-VF communication with the HWRM. The PF driver uses this command to - * register buffers for each PF-VF channel. A parent PF may issue this command - * per child VF. If VF ID is not valid, then this command is used to register - * buffers for all children VFs of the PF. - */ -/* Input (128 bytes) */ -struct hwrm_func_buf_rgtr_input { - uint16_t req_type; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4 \ + UINT32_C(0x2) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 \ + UINT32_C(0x4) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest, + * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4 \ + UINT32_C(0x8) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 \ + UINT32_C(0x10) + /* This is the address for rss ring group table */ + uint64_t ring_grp_tbl_addr; + /* This is the address for rss hash key table */ + uint64_t hash_key_tbl_addr; + /* Index to the rss indirection table. */ + uint16_t rss_ctx_idx; + uint8_t unused_1[6]; +} __attribute__((packed)); + +/* hwrm_vnic_rss_cfg_output (size:128b/16B) */ +struct hwrm_vnic_rss_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t resp_addr; + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_vnic_rss_qcfg * + **********************/ + + +/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_rss_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint32_t enables; - /* This bit must be '1' for the vf_id field to be configured. */ - #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) - /* This bit must be '1' for the err_buf_addr field to be configured. */ - #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2) - uint16_t vf_id; + uint16_t cmpl_ring; /* - * This value is used to identify a Virtual Function (VF). The - * scope of VF ID is local within a PF. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t req_buf_num_pages; + uint16_t seq_id; /* - * This field represents the number of pages used for request - * buffer(s). + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t req_buf_page_size; - /* This field represents the page size used for request buffer(s). */ - /* 16 bytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_16B UINT32_C(0x4) - /* 4 Kbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4K UINT32_C(0xc) - /* 8 Kbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_8K UINT32_C(0xd) - /* 64 Kbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_64K UINT32_C(0x10) - /* 2 Mbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_2M UINT32_C(0x15) - /* 4 Mbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4M UINT32_C(0x16) - /* 1 Gbytes */ - #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_1G UINT32_C(0x1e) - uint16_t req_buf_len; - /* The length of the request buffer per VF in bytes. */ - uint16_t resp_buf_len; - /* The length of the response buffer in bytes. */ - uint8_t unused_0; - uint8_t unused_1; - uint64_t req_buf_page_addr[10]; - /* This field represents the page address of req buffer. */ - uint64_t error_buf_addr; + uint16_t target_id; /* - * This field is used to receive the error reporting from the - * chipset. Only applicable for PFs. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t resp_buf_addr; - /* This field is used to receive the response forwarded by the HWRM. */ + uint64_t resp_addr; + /* Index to the rss indirection table. */ + uint16_t rss_ctx_idx; + uint8_t unused_0[6]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_func_buf_rgtr_output { - uint16_t error_code; +/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */ +struct hwrm_vnic_rss_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t hash_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv4 + * packets. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV4 UINT32_C(0x1) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of TCP/IPv4 packets. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv4 addresses and + * source/destination ports of UDP/IPv4 packets. */ -} __attribute__((packed)); - -/* hwrm_func_buf_unrgtr */ -/* - * Description: This command is used by the PF driver to unregister buffers used - * in the PF-VF communication with the HWRM. The PF driver uses this command to - * unregister buffers for PF-VF communication. A parent PF may issue this - * command to unregister buffers for communication between the PF and a specific - * VF. If the VF ID is not valid, then this command is used to unregister - * buffers used for communications with all children VFs of the PF. - */ -/* Input (24 bytes) */ -struct hwrm_func_buf_unrgtr_input { - uint16_t req_type; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * When this bit is '1', the RSS hash shall be computed + * over source and destination IPv4 addresses of IPv6 + * packets. */ - uint16_t cmpl_ring; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV6 UINT32_C(0x8) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of TCP/IPv6 packets. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * When this bit is '1', the RSS hash shall be computed + * over source/destination IPv6 addresses and + * source/destination ports of UDP/IPv6 packets. */ - uint64_t resp_addr; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20) + uint8_t unused_0[4]; + /* This is the value of rss hash key */ + uint32_t hash_key[10]; + /* Flags to specify different RSS hash modes. */ + uint8_t hash_mode_flags; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * When this bit is '1', it indicates using current RSS + * hash mode setting configured in the device. */ - uint32_t enables; - /* This bit must be '1' for the vf_id field to be configured. */ - #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1) - uint16_t vf_id; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_DEFAULT \ + UINT32_C(0x1) /* - * This value is used to identify a Virtual Function (VF). The - * scope of VF ID is local within a PF. + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 4 tuples {l3.src, l3.dest, + * l4.src, l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. */ - uint16_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_func_buf_unrgtr_output { - uint16_t error_code; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_4 \ + UINT32_C(0x2) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * When this bit is '1', it indicates requesting support of + * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_2 \ + UINT32_C(0x4) + /* + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest, + * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel + * packets, the RSS hash is computed over the normal + * src/dest l3 and src/dest l4 headers. + */ + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_4 \ + UINT32_C(0x8) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', it indicates requesting support of + * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for + * tunnel packets. For none-tunnel packets, the RSS hash is + * computed over the normal src/dest l3 headers. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_2 \ + UINT32_C(0x10) + uint8_t unused_1[6]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_port_phy_cfg */ -/* - * Description: This command configures the PHY device for the port. It allows - * setting of the most generic settings for the PHY. The HWRM shall complete - * this command as soon as PHY settings are configured. They may not be applied - * when the command response is provided. A VF driver shall not be allowed to - * configure PHY using this command. In a network partition mode, a PF driver - * shall not be allowed to configure PHY using this command. - */ -/* Input (56 bytes) */ -struct hwrm_port_phy_cfg_input { - uint16_t req_type; +/************************** + * hwrm_vnic_plcmodes_cfg * + **************************/ + + +/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ +struct hwrm_vnic_plcmodes_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t flags; + uint64_t resp_addr; + uint32_t flags; /* - * When this bit is set to '1', the PHY for the port shall be - * reset. # If this bit is set to 1, then the HWRM shall reset - * the PHY after applying PHY configuration changes specified in - * this command. # In order to guarantee that PHY configuration - * changes specified in this command take effect, the HWRM - * client should set this flag to 1. # If this bit is not set to - * 1, then the HWRM may reset the PHY depending on the current - * PHY configuration and settings specified in this command. + * When this bit is '1', the VNIC shall be configured to + * use regular placement algorithm. + * By default, the regular placement algorithm shall be + * enabled on the VNIC. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1) - /* deprecated bit. Do not use!!! */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED UINT32_C(0x2) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \ + UINT32_C(0x1) /* - * When this bit is set to '1', the link shall be forced to the - * force_link_speed value. When this bit is set to '1', the HWRM - * client should not enable any of the auto negotiation related - * fields represented by auto_XXX fields in this command. When - * this bit is set to '1' and the HWRM client has enabled a - * auto_XXX field in this command, then the HWRM shall ignore - * the enabled auto_XXX field. When this bit is set to zero, the - * link shall be allowed to autoneg. + * When this bit is '1', the VNIC shall be configured + * use the jumbo placement algorithm. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE UINT32_C(0x4) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \ + UINT32_C(0x2) /* - * When this bit is set to '1', the auto-negotiation process - * shall be restarted on the link. + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for IPv4 packets according + * to the following rules: + * # If the packet is identified as TCP/IPv4, then the + * packet is split at the beginning of the TCP payload. + * # If the packet is identified as UDP/IPv4, then the + * packet is split at the beginning of UDP payload. + * # If the packet is identified as non-TCP and non-UDP + * IPv4 packet, then the packet is split at the beginning + * of the upper layer protocol header carried in the IPv4 + * packet. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 \ + UINT32_C(0x4) /* - * When this bit is set to '1', Energy Efficient Ethernet (EEE) - * is requested to be enabled on this link. If EEE is not - * supported on this port, then this flag shall be ignored by - * the HWRM. + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for IPv6 packets according + * to the following rules: + * # If the packet is identified as TCP/IPv6, then the + * packet is split at the beginning of the TCP payload. + * # If the packet is identified as UDP/IPv6, then the + * packet is split at the beginning of UDP payload. + * # If the packet is identified as non-TCP and non-UDP + * IPv6 packet, then the packet is split at the beginning + * of the upper layer protocol header carried in the IPv6 + * packet. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE UINT32_C(0x10) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 \ + UINT32_C(0x8) /* - * When this bit is set to '1', Energy Efficient Ethernet (EEE) - * is requested to be disabled on this link. If EEE is not - * supported on this port, then this flag shall be ignored by - * the HWRM. + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for FCoE packets at the + * beginning of FC payload. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE UINT32_C(0x20) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE \ + UINT32_C(0x10) /* - * When this bit is set to '1' and EEE is enabled on this link, - * then TX LPI is requested to be enabled on the link. If EEE is - * not supported on this port, then this flag shall be ignored - * by the HWRM. If EEE is disabled on this port, then this flag - * shall be ignored by the HWRM. + * When this bit is '1', the VNIC shall be configured + * to enable Header-Data split for RoCE packets at the + * beginning of RoCE payload (after BTH/GRH headers). */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE UINT32_C(0x40) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE \ + UINT32_C(0x20) + uint32_t enables; /* - * When this bit is set to '1' and EEE is enabled on this link, - * then TX LPI is requested to be disabled on the link. If EEE - * is not supported on this port, then this flag shall be - * ignored by the HWRM. If EEE is disabled on this port, then - * this flag shall be ignored by the HWRM. + * This bit must be '1' for the jumbo_thresh_valid field to be + * configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \ + UINT32_C(0x1) /* - * When set to 1, then the HWRM shall enable FEC - * autonegotitation on this port if supported. When set to 0, - * then this flag shall be ignored. If FEC autonegotiation is - * not supported, then the HWRM shall ignore this flag. + * This bit must be '1' for the hds_offset_valid field to be + * configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \ + UINT32_C(0x2) /* - * When set to 1, then the HWRM shall disable FEC - * autonegotiation on this port if supported. When set to 0, - * then this flag shall be ignored. If FEC autonegotiation is - * not supported, then the HWRM shall ignore this flag. + * This bit must be '1' for the hds_threshold_valid field to be + * configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \ - UINT32_C(0x200) + #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \ + UINT32_C(0x4) + /* Logical vnic ID */ + uint32_t vnic_id; /* - * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire - * Code) on this port if supported. When set to 0, then this - * flag shall be ignored. If FEC CLAUSE 74 is not supported, - * then the HWRM shall ignore this flag. + * When jumbo placement algorithm is enabled, this value + * is used to determine the threshold for jumbo placement. + * Packets with length larger than this value will be + * placed according to the jumbo placement algorithm. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \ - UINT32_C(0x400) + uint16_t jumbo_thresh; /* - * When set to 1, then the HWRM shall disable FEC CLAUSE 74 - * (Fire Code) on this port if supported. When set to 0, then - * this flag shall be ignored. If FEC CLAUSE 74 is not - * supported, then the HWRM shall ignore this flag. + * This value is used to determine the offset into + * packet buffer where the split data (payload) will be + * placed according to one of of HDS placement algorithm. + * + * The lengths of packet buffers provided for split data + * shall be larger than this value. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \ - UINT32_C(0x800) + uint16_t hds_offset; /* - * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed - * Solomon) on this port if supported. When set to 0, then this - * flag shall be ignored. If FEC CLAUSE 91 is not supported, - * then the HWRM shall ignore this flag. + * When one of the HDS placement algorithm is enabled, this + * value is used to determine the threshold for HDS + * placement. + * Packets with length larger than this value will be + * placed according to the HDS placement algorithm. + * This value shall be in multiple of 4 bytes. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \ - UINT32_C(0x1000) + uint16_t hds_threshold; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ +struct hwrm_vnic_plcmodes_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * When set to 1, then the HWRM shall disable FEC CLAUSE 91 - * (Reed Solomon) on this port if supported. When set to 0, then - * this flag shall be ignored. If FEC CLAUSE 91 is not - * supported, then the HWRM shall ignore this flag. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \ - UINT32_C(0x2000) + uint8_t valid; +} __attribute__((packed)); + +/*************************** + * hwrm_vnic_plcmodes_qcfg * + ***************************/ + + +/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When this bit is set to '1', the link shall be forced to be - * taken down. # When this bit is set to '1", all other command - * input settings related to the link speed shall be ignored. - * Once the link state is forced down, it can be explicitly - * cleared from that state by setting this flag to '0'. # If - * this flag is set to '0', then the link shall be cleared from - * forced down state if the link is in forced down state. There - * may be conditions (e.g. out-of-band or sideband configuration - * changes for the link) outside the scope of the HWRM - * implementation that may clear forced down link state. - */ - #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN UINT32_C(0x4000) - uint32_t enables; - /* This bit must be '1' for the auto_mode field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1) - /* This bit must be '1' for the auto_duplex field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX UINT32_C(0x2) - /* This bit must be '1' for the auto_pause field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE UINT32_C(0x4) + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; /* - * This bit must be '1' for the auto_link_speed field to be - * configured. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED UINT32_C(0x8) + uint16_t seq_id; /* - * This bit must be '1' for the auto_link_speed_mask field to be - * configured. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \ - UINT32_C(0x10) - /* This bit must be '1' for the wirespeed field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIOUTPUTEED UINT32_C(0x20) - /* This bit must be '1' for the lpbk field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK UINT32_C(0x40) - /* This bit must be '1' for the preemphasis field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS UINT32_C(0x80) - /* This bit must be '1' for the force_pause field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE UINT32_C(0x100) + uint16_t target_id; /* - * This bit must be '1' for the eee_link_speed_mask field to be - * configured. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \ - UINT32_C(0x200) - /* This bit must be '1' for the tx_lpi_timer field to be configured. */ - #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER UINT32_C(0x400) - uint16_t port_id; - /* Port ID of port that is to be configured. */ - uint16_t force_link_speed; + uint64_t resp_addr; + /* Logical vnic ID */ + uint32_t vnic_id; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint32_t flags; /* - * This is the speed that will be used if the force bit is '1'. - * If unsupported speed is selected, an error will be generated. + * When this bit is '1', the VNIC is configured to + * use regular placement algorithm. */ - /* 100Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) - /* 1Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) - /* 20Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8) - /* 10Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff) - uint8_t auto_mode; + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \ + UINT32_C(0x1) /* - * This value is used to identify what autoneg mode is used when - * the link speed is not being forced. + * When this bit is '1', the VNIC is configured to + * use the jumbo placement algorithm. */ + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \ + UINT32_C(0x2) /* - * Disable autoneg or autoneg disabled. No - * speeds are selected. + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for IPv4 packets. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0) - /* Select all possible speeds for autoneg mode. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1) + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 \ + UINT32_C(0x4) /* - * Select only the auto_link_speed speed for - * autoneg mode. This mode has been DEPRECATED. - * An HWRM client should not use this mode. + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for IPv6 packets. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2) + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 \ + UINT32_C(0x8) /* - * Select the auto_link_speed or any speed below - * that speed for autoneg. This mode has been - * DEPRECATED. An HWRM client should not use - * this mode. + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for FCoE packets. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE \ + UINT32_C(0x10) /* - * Select the speeds based on the corresponding - * link speed mask value that is provided. + * When this bit is '1', the VNIC is configured + * to enable Header-Data split for RoCE packets. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4) - uint8_t auto_duplex; + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE \ + UINT32_C(0x20) /* - * This is the duplex setting that will be used if the - * autoneg_mode is "one_speed" or "one_or_below". + * When this bit is '1', the VNIC is configured + * to be the default VNIC of the requesting function. */ - /* Half Duplex will be requested. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0) - /* Full duplex will be requested. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1) - /* Both Half and Full dupex will be requested. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2) - uint8_t auto_pause; + #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC \ + UINT32_C(0x40) /* - * This value is used to configure the pause that will be used - * for autonegotiation. Add text on the usage of auto_pause and - * force_pause. + * When jumbo placement algorithm is enabled, this value + * is used to determine the threshold for jumbo placement. + * Packets with length larger than this value will be + * placed according to the jumbo placement algorithm. */ + uint16_t jumbo_thresh; /* - * When this bit is '1', Generation of tx pause messages has - * been requested. Disabled otherwise. + * This value is used to determine the offset into + * packet buffer where the split data (payload) will be + * placed according to one of of HDS placement algorithm. + * + * The lengths of packet buffers provided for split data + * shall be larger than this value. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX UINT32_C(0x1) + uint16_t hds_offset; /* - * When this bit is '1', Reception of rx pause messages has been - * requested. Disabled otherwise. + * When one of the HDS placement algorithm is enabled, this + * value is used to determine the threshold for HDS + * placement. + * Packets with length larger than this value will be + * placed according to the HDS placement algorithm. + * This value shall be in multiple of 4 bytes. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX UINT32_C(0x2) + uint16_t hds_threshold; + uint8_t unused_0[5]; /* - * When set to 1, the advertisement of pause is enabled. # When - * the auto_mode is not set to none and this flag is set to 1, - * then the auto_pause bits on this port are being advertised - * and autoneg pause results are being interpreted. # When the - * auto_mode is not set to none and this flag is set to 0, the - * pause is forced as indicated in force_pause, and also - * advertised as auto_pause bits, but the autoneg results are - * not interpreted since the pause configuration is being - * forced. # When the auto_mode is set to none and this flag is - * set to 1, auto_pause bits should be ignored and should be set - * to 0. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4) - uint8_t unused_0; - uint16_t auto_link_speed; + uint8_t valid; +} __attribute__((packed)); + +/********************************** + * hwrm_vnic_rss_cos_lb_ctx_alloc * + **********************************/ + + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This is the speed that will be used if the autoneg_mode is - * "one_speed" or "one_or_below". If an unsupported speed is - * selected, an error will be generated. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - /* 100Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1) - /* 1Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64) - /* 20Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) - /* 10Mb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff) - uint16_t auto_link_speed_mask; + uint16_t cmpl_ring; /* - * This is a mask of link speeds that will be used if - * autoneg_mode is "mask". If unsupported speed is enabled an - * error will be generated. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - /* 100Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \ - UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \ - UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \ - UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \ - UINT32_C(0x8) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \ - UINT32_C(0x10) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \ - UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40) - /* 20Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \ - UINT32_C(0x100) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \ - UINT32_C(0x200) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \ - UINT32_C(0x400) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \ - UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \ - UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \ - UINT32_C(0x2000) - uint8_t wirespeed; - /* This value controls the wirespeed feature. */ - /* Wirespeed feature is disabled. */ - #define HWRM_PORT_PHY_CFG_INPUT_WIOUTPUTEED_OFF UINT32_C(0x0) - /* Wirespeed feature is enabled. */ - #define HWRM_PORT_PHY_CFG_INPUT_WIOUTPUTEED_ON UINT32_C(0x1) - uint8_t lpbk; - /* This value controls the loopback setting for the PHY. */ - /* No loopback is selected. Normal operation. */ - #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0) + uint16_t seq_id; /* - * The HW will be configured with local loopback - * such that host data is sent back to the host - * without modification. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1) + uint16_t target_id; /* - * The HW will be configured with remote - * loopback such that port logic will send - * packets back out the transmitter that are - * received. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2) - uint8_t force_pause; + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* rss_cos_lb_ctx_id is 16 b */ + uint16_t rss_cos_lb_ctx_id; + uint8_t unused_0[5]; /* - * This value is used to configure the pause that will be used - * for force mode. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; +} __attribute__((packed)); + +/********************************* + * hwrm_vnic_rss_cos_lb_ctx_free * + *********************************/ + + +/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When this bit is '1', Generation of tx pause messages is - * supported. Disabled otherwise. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1) + uint16_t cmpl_ring; /* - * When this bit is '1', Reception of rx pause messages is - * supported. Disabled otherwise. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2) - uint8_t unused_1; - uint32_t preemphasis; + uint16_t seq_id; /* - * This value controls the pre-emphasis to be used for the link. - * Driver should not set this value (use enable.preemphasis = 0) - * unless driver is sure of setting. Normally HWRM FW will - * determine proper pre-emphasis. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t eee_link_speed_mask; + uint16_t target_id; /* - * Setting for link speed mask that is used to advertise speeds - * during autonegotiation when EEE is enabled. This field is - * valid only when EEE is enabled. The speeds specified in this - * field shall be a subset of speeds specified in - * auto_link_speed_mask. If EEE is enabled,then at least one - * speed shall be provided in this mask. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2) - /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8) - /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10) - /* Reserved */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40) - uint8_t unused_2; - uint8_t unused_3; - uint32_t tx_lpi_timer; - uint32_t unused_4; + uint64_t resp_addr; + /* rss_cos_lb_ctx_id is 16 b */ + uint16_t rss_cos_lb_ctx_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Reuested setting of TX LPI timer in microseconds. This field - * is valid only when EEE is enabled and TX LPI is enabled. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff) - #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0 + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_port_phy_cfg_output { - uint16_t error_code; +/******************* + * hwrm_ring_alloc * + *******************/ + + +/* hwrm_ring_alloc_input (size:640b/80B) */ +struct hwrm_ring_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_port_phy_qcfg */ -/* Description: This command queries the PHY configuration for the port. */ -/* Input (24 bytes) */ -struct hwrm_port_phy_qcfg_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + uint32_t enables; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the ring_arb_cfg field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG \ + UINT32_C(0x2) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the stat_ctx_id_valid field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID \ + UINT32_C(0x8) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the max_bw_valid field to be + * configured. */ - uint16_t port_id; - /* Port ID of port that is to be queried. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (96 bytes) */ -struct hwrm_port_phy_qcfg_output { - uint16_t error_code; + #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID \ + UINT32_C(0x20) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This bit must be '1' for the rx_ring_id field to be + * configured. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID \ + UINT32_C(0x40) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This bit must be '1' for the nq_ring_id field to be + * configured. */ - uint8_t link; - /* This value indicates the current link status. */ - /* There is no link or cable detected. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0) - /* There is no link, but a cable has been detected. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1) - /* There is a link. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2) - uint8_t unused_0; - uint16_t link_speed; - /* This value indicates the current link speed of the connection. */ - /* 100Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1) - /* 1Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64) - /* 20Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8) - /* 10Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff) - uint8_t duplex_cfg; - /* This value is indicates the duplex of the current connection. */ - /* Half Duplex connection. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_HALF UINT32_C(0x0) - /* Full duplex connection. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL UINT32_C(0x1) - uint8_t pause; + #define HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID \ + UINT32_C(0x80) /* - * This value is used to indicate the current pause - * configuration. When autoneg is enabled, this value represents - * the autoneg results of pause configuration. + * This bit must be '1' for the rx_buf_size field to be + * configured. */ - /* - * When this bit is '1', Generation of tx pause messages is - * supported. Disabled otherwise. + #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID \ + UINT32_C(0x100) + /* Ring Type. */ + uint8_t ring_type; + /* L2 Completion Ring (CR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1) + /* RX Ring (RR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) + /* RX Aggregation Ring */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4) + /* Notification Queue */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ UINT32_C(0x5) + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_LAST \ + HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ + uint8_t unused_0[3]; + /* + * This value is a pointer to the page table for the + * Ring. + */ + uint64_t page_tbl_addr; + /* First Byte Offset of the first entry in the first page. */ + uint32_t fbo; + /* + * Actual page size in 2^page_size. The supported range is increments + * in powers of 2 from 16 bytes to 1GB. + * - 4 = 16 B + * Page size is 16 B. + * - 12 = 4 KB + * Page size is 4 KB. + * - 13 = 8 KB + * Page size is 8 KB. + * - 16 = 64 KB + * Page size is 64 KB. + * - 21 = 2 MB + * Page size is 2 MB. + * - 22 = 4 MB + * Page size is 4 MB. + * - 30 = 1 GB + * Page size is 1 GB. + */ + uint8_t page_size; + /* + * This value indicates the depth of page table. + * For this version of the specification, value other than 0 or + * 1 shall be considered as an invalid value. + * When the page_tbl_depth = 0, then it is treated as a + * special case with the following. + * 1. FBO and page size fields are not valid. + * 2. page_tbl_addr is the physical address of the first + * element of the ring. + */ + uint8_t page_tbl_depth; + uint8_t unused_1[2]; + /* + * Number of 16B units in the ring. Minimum size for + * a ring is 16 16B entries. + */ + uint32_t length; + /* + * Logical ring number for the ring to be allocated. + * This value determines the position in the doorbell + * area where the update to the ring will be made. + * + * For completion rings, this value is also the MSI-X + * vector number for the function the completion ring is + * associated with. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1) + uint16_t logical_id; /* - * When this bit is '1', Reception of rx pause messages is - * supported. Disabled otherwise. + * This field is used only when ring_type is a TX ring. + * This value indicates what completion ring the TX ring + * is associated with. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2) - uint16_t support_speeds; + uint16_t cmpl_ring_id; /* - * The supported speeds for the port. This is a bit mask. For - * each speed that is supported, the corrresponding bit will be - * set to '1'. + * This field is used only when ring_type is a TX ring. + * This value indicates what CoS queue the TX ring + * is associated with. */ - /* 100Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB UINT32_C(0x8) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB UINT32_C(0x10) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40) - /* 20Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000) - uint16_t force_link_speed; + uint16_t queue_id; /* - * Current setting of forced link speed. When the link speed is - * not being forced, this value shall be set to 0. + * When allocating a Rx ring or Rx aggregation ring, this field + * specifies the size of the buffer descriptors posted to the ring. */ - /* 100Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1) - /* 1Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64) - /* 20Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8) - /* 10Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff) - uint8_t auto_mode; - /* Current setting of auto negotiation mode. */ + uint16_t rx_buf_size; /* - * Disable autoneg or autoneg disabled. No - * speeds are selected. + * When allocating an Rx aggregation ring, this field + * specifies the associated Rx ring ID. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0) - /* Select all possible speeds for autoneg mode. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1) + uint16_t rx_ring_id; /* - * Select only the auto_link_speed speed for - * autoneg mode. This mode has been DEPRECATED. - * An HWRM client should not use this mode. + * When allocating a completion ring, this field + * specifies the associated NQ ring ID. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2) + uint16_t nq_ring_id; /* - * Select the auto_link_speed or any speed below - * that speed for autoneg. This mode has been - * DEPRECATED. An HWRM client should not use - * this mode. + * This field is used only when ring_type is a TX ring. + * This field is used to configure arbitration related + * parameters for a TX ring. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3) + uint16_t ring_arb_cfg; + /* Arbitration policy used for the ring. */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK \ + UINT32_C(0xf) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0 /* - * Select the speeds based on the corresponding - * link speed mask value that is provided. + * Use strict priority for the TX ring. + * Priority value is specified in arb_policy_param */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4) - uint8_t auto_pause; + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \ + UINT32_C(0x1) /* - * Current setting of pause autonegotiation. Move autoneg_pause - * flag here. + * Use weighted fair queue arbitration for the TX ring. + * Weight is specified in arb_policy_param */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \ + UINT32_C(0x2) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \ + HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ + /* Reserved field. */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK \ + UINT32_C(0xf0) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4 + /* + * Arbitration policy specific parameter. + * # For strict priority arbitration policy, this field + * represents a priority value. If set to 0, then the priority + * is not specified and the HWRM is allowed to select + * any priority for this TX ring. + * # For weighted fair queue arbitration policy, this field + * represents a weight value. If set to 0, then the weight + * is not specified and the HWRM is allowed to select + * any weight for this TX ring. + */ + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \ + UINT32_C(0xff00) + #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 + uint16_t unused_3; /* - * When this bit is '1', Generation of tx pause messages has - * been requested. Disabled otherwise. + * This field is reserved for the future use. + * It shall be set to 0. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX UINT32_C(0x1) + uint32_t reserved3; /* - * When this bit is '1', Reception of rx pause messages has been - * requested. Disabled otherwise. + * This field is used only when ring_type is a TX ring. + * This input indicates what statistics context this ring + * should be associated with. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX UINT32_C(0x2) + uint32_t stat_ctx_id; /* - * When set to 1, the advertisement of pause is enabled. # When - * the auto_mode is not set to none and this flag is set to 1, - * then the auto_pause bits on this port are being advertised - * and autoneg pause results are being interpreted. # When the - * auto_mode is not set to none and this flag is set to 0, the - * pause is forced as indicated in force_pause, and also - * advertised as auto_pause bits, but the autoneg results are - * not interpreted since the pause configuration is being - * forced. # When the auto_mode is set to none and this flag is - * set to 1, auto_pause bits should be ignored and should be set - * to 0. + * This field is reserved for the future use. + * It shall be set to 0. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4) - uint16_t auto_link_speed; + uint32_t reserved4; /* - * Current setting for auto_link_speed. This field is only valid - * when auto_mode is set to "one_speed" or "one_or_below". + * This field is used only when ring_type is a TX ring + * to specify maximum BW allocated to the TX ring. + * The HWRM will translate this value into byte counter and + * time interval used for this ring inside the device. */ - /* 100Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1) - /* 1Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64) - /* 20Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8) - /* 10Mb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff) - uint16_t auto_link_speed_mask; + uint32_t max_bw; + /* The bandwidth value. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \ + HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ + HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID /* - * Current setting for auto_link_speed_mask that is used to - * advertise speeds during autonegotiation. This field is only - * valid when auto_mode is set to "mask". The speeds specified - * in this field shall be a subset of supported speeds on this - * port. + * This field is used only when ring_type is a Completion ring. + * This value indicates what interrupt mode should be used + * on this completion ring. + * Note: In the legacy interrupt mode, no more than 16 + * completion rings are allowed. */ - /* 100Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \ - UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \ - UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \ - UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \ - UINT32_C(0x10) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \ - UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \ - UINT32_C(0x40) - /* 20Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \ - UINT32_C(0x80) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \ - UINT32_C(0x100) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \ - UINT32_C(0x200) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \ - UINT32_C(0x400) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \ - UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \ - UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \ - UINT32_C(0x2000) - uint8_t wirespeed; - /* Current setting for wirespeed. */ - /* Wirespeed feature is disabled. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_WIOUTPUTEED_OFF UINT32_C(0x0) - /* Wirespeed feature is enabled. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_WIOUTPUTEED_ON UINT32_C(0x1) - uint8_t lpbk; - /* Current setting for loopback. */ - /* No loopback is selected. Normal operation. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0) + uint8_t int_mode; + /* Legacy INTA */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0) + /* Reserved */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1) + /* MSI-X */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2) + /* No Interrupt - Polled mode */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3) + #define HWRM_RING_ALLOC_INPUT_INT_MODE_LAST \ + HWRM_RING_ALLOC_INPUT_INT_MODE_POLL + uint8_t unused_4[3]; +} __attribute__((packed)); + +/* hwrm_ring_alloc_output (size:128b/16B) */ +struct hwrm_ring_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * The HW will be configured with local loopback - * such that host data is sent back to the host - * without modification. + * Physical number of ring allocated. + * This value shall be unique for a ring type. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1) + uint16_t ring_id; + /* Logical number of ring allocated. */ + uint16_t logical_ring_id; + uint8_t unused_0[3]; /* - * The HW will be configured with remote - * loopback such that port logic will send - * packets back out the transmitter that are - * received. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2) - uint8_t force_pause; + uint8_t valid; +} __attribute__((packed)); + +/****************** + * hwrm_ring_free * + ******************/ + + +/* hwrm_ring_free_input (size:192b/24B) */ +struct hwrm_ring_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Current setting of forced pause. When the pause configuration - * is not being forced, then this value shall be set to 0. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ + uint16_t cmpl_ring; /* - * When this bit is '1', Generation of tx pause messages is - * supported. Disabled otherwise. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1) + uint16_t seq_id; /* - * When this bit is '1', Reception of rx pause messages is - * supported. Disabled otherwise. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2) - uint8_t module_status; + uint16_t target_id; /* - * This value indicates the current status of the optics module - * on this port. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* Module is inserted and accepted */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE UINT32_C(0x0) - /* Module is rejected and transmit side Laser is disabled. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX UINT32_C(0x1) - /* Module mismatch warning. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG UINT32_C(0x2) - /* Module is rejected and powered down. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN UINT32_C(0x3) - /* Module is not inserted. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \ - UINT32_C(0x4) - /* Module status is not applicable. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \ - UINT32_C(0xff) - uint32_t preemphasis; - /* Current setting for preemphasis. */ - uint8_t phy_maj; - /* This field represents the major version of the PHY. */ - uint8_t phy_min; - /* This field represents the minor version of the PHY. */ - uint8_t phy_bld; - /* This field represents the build version of the PHY. */ - uint8_t phy_type; - /* This value represents a PHY type. */ - /* Unknown */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN UINT32_C(0x0) - /* BASE-CR */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR UINT32_C(0x1) - /* BASE-KR4 (Deprecated) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 UINT32_C(0x2) - /* BASE-LR */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR UINT32_C(0x3) - /* BASE-SR */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR UINT32_C(0x4) - /* BASE-KR2 (Deprecated) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 UINT32_C(0x5) - /* BASE-KX */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX UINT32_C(0x6) - /* BASE-KR */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR UINT32_C(0x7) - /* BASE-T */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET UINT32_C(0x8) - /* EEE capable BASE-T */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE UINT32_C(0x9) - /* SGMII connected external PHY */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY UINT32_C(0xa) - /* 25G_BASECR_CA_L */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L UINT32_C(0xb) - /* 25G_BASECR_CA_S */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S UINT32_C(0xc) - /* 25G_BASECR_CA_N */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N UINT32_C(0xd) - /* 25G_BASESR */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR UINT32_C(0xe) - /* 100G_BASECR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 UINT32_C(0xf) - /* 100G_BASESR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 UINT32_C(0x10) - /* 100G_BASELR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 UINT32_C(0x11) - /* 100G_BASEER4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 UINT32_C(0x12) - /* 100G_BASESR10 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 UINT32_C(0x13) - /* 40G_BASECR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 UINT32_C(0x14) - /* 40G_BASESR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 UINT32_C(0x15) - /* 40G_BASELR4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 UINT32_C(0x16) - /* 40G_BASEER4 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 UINT32_C(0x17) - /* 40G_ACTIVE_CABLE */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \ - UINT32_C(0x18) - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET UINT32_C(0x19) - /* 1G_baseSX */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX UINT32_C(0x1a) - /* 1G_baseCX */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX UINT32_C(0x1b) - uint8_t media_type; - /* This value represents a media type. */ - /* Unknown */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0) - /* Twisted Pair */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1) - /* Direct Attached Copper */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2) - /* Fiber */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3) - uint8_t xcvr_pkg_type; - /* This value represents a transceiver type. */ - /* PHY and MAC are in the same package */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \ - UINT32_C(0x1) - /* PHY and MAC are in different packages */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \ - UINT32_C(0x2) - uint8_t eee_config_phy_addr; + uint64_t resp_addr; + /* Ring Type. */ + uint8_t ring_type; + /* L2 Completion Ring (CR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1) + /* RX Ring (RR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) + /* RX Aggregation Ring */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4) + /* Notification Queue */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_NQ UINT32_C(0x5) + #define HWRM_RING_FREE_INPUT_RING_TYPE_LAST \ + HWRM_RING_FREE_INPUT_RING_TYPE_NQ + uint8_t unused_0; + /* Physical number of ring allocated. */ + uint16_t ring_id; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_ring_free_output (size:128b/16B) */ +struct hwrm_ring_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field represents flags related to EEE configuration. - * These EEE configuration flags are valid only when the - * auto_mode is not set to none (in other words autonegotiation - * is enabled). + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - /* This field represents PHY address. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f) - #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0 + uint8_t valid; +} __attribute__((packed)); + +/************************************** + * hwrm_ring_cmpl_ring_qaggint_params * + **************************************/ + + +/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When set to 1, Energy Efficient Ethernet (EEE) mode is - * enabled. Speeds for autoneg with EEE mode enabled are based - * on eee_link_speed_mask. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED UINT32_C(0x20) + uint16_t cmpl_ring; /* - * This flag is valid only when eee_enabled is set to 1. # If - * eee_enabled is set to 0, then EEE mode is disabled and this - * flag shall be ignored. # If eee_enabled is set to 1 and this - * flag is set to 1, then Energy Efficient Ethernet (EEE) mode - * is enabled and in use. # If eee_enabled is set to 1 and this - * flag is set to 0, then Energy Efficient Ethernet (EEE) mode - * is enabled but is currently not in use. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE UINT32_C(0x40) + uint16_t seq_id; /* - * This flag is valid only when eee_enabled is set to 1. # If - * eee_enabled is set to 0, then EEE mode is disabled and this - * flag shall be ignored. # If eee_enabled is set to 1 and this - * flag is set to 1, then Energy Efficient Ethernet (EEE) mode - * is enabled and TX LPI is enabled. # If eee_enabled is set to - * 1 and this flag is set to 0, then Energy Efficient Ethernet - * (EEE) mode is enabled but TX LPI is disabled. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI UINT32_C(0x80) + uint16_t target_id; /* - * This field represents flags related to EEE configuration. - * These EEE configuration flags are valid only when the - * auto_mode is not set to none (in other words autonegotiation - * is enabled). + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK UINT32_C(0xe0) - #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5 - uint8_t parallel_detect; - /* Reserved field, set to 0 */ + uint64_t resp_addr; + /* Physical number of completion ring. */ + uint16_t ring_id; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint16_t flags; /* - * When set to 1, the parallel detection is used to determine - * the speed of the link partner. Parallel detection is used - * when a autonegotiation capable device is connected to a link - * parter that is not capable of autonegotiation. + * When this bit is set to '1', interrupt max + * timer is reset whenever a completion is received. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1) - /* Reserved field, set to 0 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_MASK UINT32_C(0xfe) - #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_SFT 1 - uint16_t link_partner_adv_speeds; + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_TIMER_RESET \ + UINT32_C(0x1) /* - * The advertised speeds for the port by the link partner. Each - * advertised speed will be set to '1'. + * When this bit is set to '1', ring idle mode + * aggregation will be enabled. */ - /* 100Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \ - UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \ + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_RING_IDLE \ UINT32_C(0x2) - /* 1Gb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \ - UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \ - UINT32_C(0x8) - /* 2Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \ - UINT32_C(0x10) - /* 2.5Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \ - UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \ - UINT32_C(0x40) - /* 20Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \ - UINT32_C(0x80) - /* 25Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \ - UINT32_C(0x100) - /* 40Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \ - UINT32_C(0x200) - /* 50Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \ - UINT32_C(0x400) - /* 100Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \ - UINT32_C(0x800) - /* 10Mb link speed (Half-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \ - UINT32_C(0x1000) - /* 10Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \ - UINT32_C(0x2000) - uint8_t link_partner_adv_auto_mode; /* - * The advertised autoneg for the port by the link partner. This - * field is deprecated and should be set to 0. + * Number of completions to aggregate before DMA + * during the normal mode. */ + uint16_t num_cmpl_dma_aggr; /* - * Disable autoneg or autoneg disabled. No - * speeds are selected. + * Number of completions to aggregate before DMA + * during the interrupt mode. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \ - UINT32_C(0x0) - /* Select all possible speeds for autoneg mode. */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \ - UINT32_C(0x1) + uint16_t num_cmpl_dma_aggr_during_int; /* - * Select only the auto_link_speed speed for - * autoneg mode. This mode has been DEPRECATED. - * An HWRM client should not use this mode. + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the normal mode (not in interrupt mode). */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \ - UINT32_C(0x2) + uint16_t cmpl_aggr_dma_tmr; /* - * Select the auto_link_speed or any speed below - * that speed for autoneg. This mode has been - * DEPRECATED. An HWRM client should not use - * this mode. + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the interrupt mode. */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \ - UINT32_C(0x3) + uint16_t cmpl_aggr_dma_tmr_during_int; + /* Minimum time (in unit of 80-nsec) between two interrupts. */ + uint16_t int_lat_tmr_min; /* - * Select the speeds based on the corresponding - * link speed mask value that is provided. + * Maximum wait time (in unit of 80-nsec) spent aggregating + * completions before signaling the interrupt after the + * interrupt is enabled. */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \ - UINT32_C(0x4) - uint8_t link_partner_adv_pause; - /* The advertised pause settings on the port by the link partner. */ + uint16_t int_lat_tmr_max; /* - * When this bit is '1', Generation of tx pause messages is - * supported. Disabled otherwise. + * Minimum number of completions aggregated before signaling + * an interrupt. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \ - UINT32_C(0x1) + uint16_t num_cmpl_aggr_int; + uint8_t unused_0[7]; /* - * When this bit is '1', Reception of rx pause messages is - * supported. Disabled otherwise. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \ - UINT32_C(0x2) - uint16_t adv_eee_link_speed_mask; + uint8_t valid; +} __attribute__((packed)); + +/***************************************** + * hwrm_ring_cmpl_ring_cfg_aggint_params * + *****************************************/ + + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Current setting for link speed mask that is used to advertise - * speeds during autonegotiation when EEE is enabled. This field - * is valid only when eee_enabled flags is set to 1. The speeds - * specified in this field shall be a subset of speeds specified - * in auto_link_speed_mask. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - /* Reserved */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ - UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \ - UINT32_C(0x2) - /* Reserved */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ - UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \ - UINT32_C(0x8) - /* Reserved */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ - UINT32_C(0x10) - /* Reserved */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ - UINT32_C(0x20) - /* 10Gb link speed */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \ - UINT32_C(0x40) - uint16_t link_partner_adv_eee_link_speed_mask; + uint16_t cmpl_ring; /* - * Current setting for link speed mask that is advertised by the - * link partner when EEE is enabled. This field is valid only - * when eee_enabled flags is set to 1. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - /* Reserved */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + /* Physical number of completion ring. */ + uint16_t ring_id; + uint16_t flags; + /* + * When this bit is set to '1', interrupt latency max + * timer is reset whenever a completion is received. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET \ UINT32_C(0x1) - /* 100Mb link speed (Full-duplex) */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \ + /* + * When this bit is set to '1', ring idle mode + * aggregation will be enabled. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE \ UINT32_C(0x2) - /* Reserved */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ + /* + * Set this flag to 1 when configuring parameters on a + * notification queue. Set this flag to 0 when configuring + * parameters on a completion queue. + */ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_IS_NQ \ UINT32_C(0x4) - /* 1Gb link speed (Full-duplex) */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \ - UINT32_C(0x8) - /* Reserved */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ - UINT32_C(0x10) - /* Reserved */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ - UINT32_C(0x20) - /* 10Gb link speed */ - #define \ - HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \ - UINT32_C(0x40) - uint32_t xcvr_identifier_type_tx_lpi_timer; - /* This value represents transceiver identifier type. */ /* - * Current setting of TX LPI timer in microseconds. This field - * is valid only when_eee_enabled flag is set to 1 and - * tx_lpi_enabled is set to 1. + * Number of completions to aggregate before DMA + * during the normal mode. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff) - #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0 - /* This value represents transceiver identifier type. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \ - UINT32_C(0xff000000) - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24 - /* Unknown */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \ - (UINT32_C(0x0) << 24) - /* SFP/SFP+/SFP28 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \ - (UINT32_C(0x3) << 24) - /* QSFP */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \ - (UINT32_C(0xc) << 24) - /* QSFP+ */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \ - (UINT32_C(0xd) << 24) - /* QSFP28 */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \ - (UINT32_C(0x11) << 24) - uint16_t fec_cfg; + uint16_t num_cmpl_dma_aggr; + /* + * Number of completions to aggregate before DMA + * during the interrupt mode. + */ + uint16_t num_cmpl_dma_aggr_during_int; + /* + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the normal mode (not in interrupt mode). + */ + uint16_t cmpl_aggr_dma_tmr; + /* + * Timer in unit of 80-nsec used to aggregate completions before + * DMA during the interrupt mode. + */ + uint16_t cmpl_aggr_dma_tmr_during_int; + /* Minimum time (in unit of 80-nsec) between two interrupts. */ + uint16_t int_lat_tmr_min; + /* + * Maximum wait time (in unit of 80-nsec) spent aggregating + * cmpls before signaling the interrupt after the + * interrupt is enabled. + */ + uint16_t int_lat_tmr_max; + /* + * Minimum number of completions aggregated before signaling + * an interrupt. + */ + uint16_t num_cmpl_aggr_int; /* - * This value represents the current configuration of Forward - * Error Correction (FEC) on the port. + * Bitfield that indicates which parameters are to be applied. Only + * required when configuring devices with notification queues, and + * used in that case to set certain parameters on completion queues + * and others on notification queues. */ + uint16_t enables; /* - * When set to 1, then FEC is not supported on this port. If - * this flag is set to 1, then all other FEC configuration flags - * shall be ignored. When set to 0, then FEC is supported as - * indicated by other configuration flags. If no cable is - * attached and the HWRM does not yet know the FEC capability, - * then the HWRM shall set this flag to 1 when reporting FEC - * capability. + * This bit must be '1' for the num_cmpl_dma_aggr field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR \ UINT32_C(0x1) /* - * When set to 1, then FEC autonegotiation is supported on this - * port. When set to 0, then FEC autonegotiation is not - * supported on this port. + * This bit must be '1' for the num_cmpl_dma_aggr_during_int field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT \ UINT32_C(0x2) /* - * When set to 1, then FEC autonegotiation is enabled on this - * port. When set to 0, then FEC autonegotiation is disabled if - * supported. This flag should be ignored if FEC autonegotiation - * is not supported on this port. + * This bit must be '1' for the cmpl_aggr_dma_tmr field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR \ UINT32_C(0x4) /* - * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on - * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is - * not supported on this port. + * This bit must be '1' for the int_lat_tmr_min field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MIN \ UINT32_C(0x8) /* - * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on - * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is - * disabled if supported. This flag should be ignored if FEC - * CLAUSE 74 is not supported on this port. + * This bit must be '1' for the int_lat_tmr_max field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MAX \ UINT32_C(0x10) /* - * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported - * on this port. When set to 0, then FEC CLAUSE 91 (Reed - * Solomon) is not supported on this port. + * This bit must be '1' for the num_cmpl_aggr_int field to be + * configured. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \ + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_AGGR_INT \ UINT32_C(0x20) + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled - * on this port. When set to 0, then FEC CLAUSE 91 (Reed - * Solomon) is disabled if supported. This flag should be - * ignored if FEC CLAUSE 91 is not supported on this port. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \ - UINT32_C(0x40) - uint8_t duplex_state; + uint8_t valid; +} __attribute__((packed)); + +/******************* + * hwrm_ring_reset * + *******************/ + + +/* hwrm_ring_reset_input (size:192b/24B) */ +struct hwrm_ring_reset_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value is indicates the duplex of the current connection - * state. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - /* Half Duplex connection. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF UINT32_C(0x0) - /* Full duplex connection. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL UINT32_C(0x1) - uint8_t unused_1; - char phy_vendor_name[16]; + uint16_t cmpl_ring; /* - * Up to 16 bytes of null padded ASCII string representing PHY - * vendor. If the string is set to null, then the vendor name is - * not available. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - char phy_vendor_partnumber[16]; + uint16_t seq_id; /* - * Up to 16 bytes of null padded ASCII string that identifies - * vendor specific part number of the PHY. If the string is set - * to null, then the vendor specific part number is not - * available. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t unused_2; - uint8_t unused_3; - uint8_t unused_4; - uint8_t unused_5; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* Ring Type. */ + uint8_t ring_type; + /* L2 Completion Ring (CR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) + /* TX Ring (TR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_TX UINT32_C(0x1) + /* RX Ring (RR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_RX UINT32_C(0x2) + /* RoCE Notification Completion Ring (ROCE_CR) */ + #define HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) + #define HWRM_RING_RESET_INPUT_RING_TYPE_LAST \ + HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL + uint8_t unused_0; + /* Physical number of the ring. */ + uint16_t ring_id; + uint8_t unused_1[4]; } __attribute__((packed)); -/* hwrm_port_qstats */ -/* Description: This function returns per port Ethernet statistics. */ -/* Input (40 bytes) */ -struct hwrm_port_qstats_input { - uint16_t req_type; +/* hwrm_ring_reset_output (size:128b/16B) */ +struct hwrm_ring_reset_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_ring_grp_alloc * + ***********************/ + + +/* hwrm_ring_grp_alloc_input (size:192b/24B) */ +struct hwrm_ring_grp_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t port_id; - /* Port ID of port that is being queried. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2[3]; - uint8_t unused_3; - uint64_t tx_stat_host_addr; - /* This is the host address where Tx port statistics will be stored */ - uint64_t rx_stat_host_addr; - /* This is the host address where Rx port statistics will be stored */ -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_port_qstats_output { - uint16_t error_code; + uint16_t target_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t resp_addr; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This value identifies the CR associated with the ring + * group. */ - uint16_t tx_stat_size; - /* The size of TX port statistics block in bytes. */ - uint16_t rx_stat_size; - /* The size of RX port statistics block in bytes. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint16_t cr; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This value identifies the main RR associated with the ring + * group. */ -} __attribute__((packed)); - -/* hwrm_port_clr_stats */ -/* - * Description: This function clears per port statistics. The HWRM shall not - * allow a VF driver to clear port statistics. The HWRM shall not allow a PF - * driver to clear port statistics in a partitioning mode. The HWRM may allow a - * PF driver to clear port statistics in the non-partitioning mode. - */ -/* Input (24 bytes) */ -struct hwrm_port_clr_stats_input { - uint16_t req_type; + uint16_t rr; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This value identifies the aggregation RR associated with + * the ring group. If this value is 0xFF... (All Fs), then no + * Aggregation ring will be set. */ - uint16_t cmpl_ring; + uint16_t ar; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This value identifies the statistics context associated + * with the ring group. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t sc; +} __attribute__((packed)); + +/* hwrm_ring_grp_alloc_output (size:128b/16B) */ +struct hwrm_ring_grp_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This is the ring group ID value. Use this value to program + * the default ring group for the VNIC or as table entries + * in an RSS/COS context. */ - uint64_t resp_addr; + uint32_t ring_group_id; + uint8_t unused_0[3]; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t port_id; - /* Port ID of port that is being queried. */ - uint16_t unused_0[3]; + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_port_clr_stats_output { - uint16_t error_code; +/********************** + * hwrm_ring_grp_free * + **********************/ + + +/* hwrm_ring_grp_free_input (size:192b/24B) */ +struct hwrm_ring_grp_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* This is the ring group ID value. */ + uint32_t ring_group_id; + uint8_t unused_0[4]; } __attribute__((packed)); -/* hwrm_port_led_cfg */ -/* - * Description: This function is used to configure LEDs on a given port. Each - * port has individual set of LEDs associated with it. These LEDs are used for - * speed/link configuration as well as activity indicator configuration. Up to - * three LEDs can be configured, one for activity and two for speeds. - */ -/* Input (64 bytes) */ -struct hwrm_port_led_cfg_input { - uint16_t req_type; +/* hwrm_ring_grp_free_output (size:128b/16B) */ +struct hwrm_ring_grp_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_cfa_l2_filter_alloc * + ****************************/ + + +/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ +struct hwrm_cfa_l2_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH \ + UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x2) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. */ - uint16_t cmpl_ring; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP \ + UINT32_C(0x4) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * If this flag is set, all t_l2_* fields are invalid + * and they should not be specified. + * If this flag is set, then l2_* fields refer to + * fields of outermost L2 header. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST \ + UINT32_C(0x8) + uint32_t enables; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the l2_addr field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \ + UINT32_C(0x1) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the l2_addr_mask field to be + * configured. */ - uint32_t enables; - /* This bit must be '1' for the led0_id field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID UINT32_C(0x1) - /* This bit must be '1' for the led0_state field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE UINT32_C(0x2) - /* This bit must be '1' for the led0_color field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR UINT32_C(0x4) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \ + UINT32_C(0x2) /* - * This bit must be '1' for the led0_blink_on field to be + * This bit must be '1' for the l2_ovlan field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON UINT32_C(0x8) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN \ + UINT32_C(0x4) /* - * This bit must be '1' for the led0_blink_off field to be + * This bit must be '1' for the l2_ovlan_mask field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF UINT32_C(0x10) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \ + UINT32_C(0x8) /* - * This bit must be '1' for the led0_group_id field to be + * This bit must be '1' for the l2_ivlan field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID UINT32_C(0x20) - /* This bit must be '1' for the led1_id field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID UINT32_C(0x40) - /* This bit must be '1' for the led1_state field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE UINT32_C(0x80) - /* This bit must be '1' for the led1_color field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR UINT32_C(0x100) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \ + UINT32_C(0x10) /* - * This bit must be '1' for the led1_blink_on field to be + * This bit must be '1' for the l2_ivlan_mask field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON UINT32_C(0x200) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \ + UINT32_C(0x20) /* - * This bit must be '1' for the led1_blink_off field to be + * This bit must be '1' for the t_l2_addr field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF UINT32_C(0x400) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR \ + UINT32_C(0x40) /* - * This bit must be '1' for the led1_group_id field to be + * This bit must be '1' for the t_l2_addr_mask field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID UINT32_C(0x800) - /* This bit must be '1' for the led2_id field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID UINT32_C(0x1000) - /* This bit must be '1' for the led2_state field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE UINT32_C(0x2000) - /* This bit must be '1' for the led2_color field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR UINT32_C(0x4000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \ + UINT32_C(0x80) /* - * This bit must be '1' for the led2_blink_on field to be + * This bit must be '1' for the t_l2_ovlan field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON UINT32_C(0x8000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \ + UINT32_C(0x100) /* - * This bit must be '1' for the led2_blink_off field to be + * This bit must be '1' for the t_l2_ovlan_mask field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF UINT32_C(0x10000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \ + UINT32_C(0x200) /* - * This bit must be '1' for the led2_group_id field to be + * This bit must be '1' for the t_l2_ivlan field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID UINT32_C(0x20000) - /* This bit must be '1' for the led3_id field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID UINT32_C(0x40000) - /* This bit must be '1' for the led3_state field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE UINT32_C(0x80000) - /* This bit must be '1' for the led3_color field to be configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR UINT32_C(0x100000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \ + UINT32_C(0x400) /* - * This bit must be '1' for the led3_blink_on field to be + * This bit must be '1' for the t_l2_ivlan_mask field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON UINT32_C(0x200000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \ + UINT32_C(0x800) /* - * This bit must be '1' for the led3_blink_off field to be + * This bit must be '1' for the src_type field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \ - UINT32_C(0x400000) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE \ + UINT32_C(0x1000) /* - * This bit must be '1' for the led3_group_id field to be + * This bit must be '1' for the src_id field to be * configured. */ - #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID UINT32_C(0x800000) - uint16_t port_id; - /* Port ID of port whose LEDs are configured. */ - uint8_t num_leds; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID \ + UINT32_C(0x2000) /* - * The number of LEDs that are being configured. Up to 4 LEDs - * can be configured with this command. + * This bit must be '1' for the tunnel_type field to be + * configured. */ - uint8_t rsvd; - /* Reserved field. */ - uint8_t led0_id; - /* An identifier for the LED #0. */ - uint8_t led0_state; - /* The requested state of the LED #0. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4) - uint8_t led0_color; - /* The requested color of LED #0. */ - /* Default */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_0; - uint16_t led0_blink_on; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the dst_id field to be + * configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x8000) /* - * If the LED #0 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * This bit must be '1' for the mirror_vnic_id field to be + * configured. */ - uint16_t led0_blink_off; + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x10000) /* - * If the LED #0 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * This value sets the match value for the L2 MAC address. + * Destination MAC address for RX path. + * Source MAC address for TX path. */ - uint8_t led0_group_id; + uint8_t l2_addr[6]; + uint8_t unused_0[2]; /* - * An identifier for the group of LEDs that LED #0 belongs to. - * If set to 0, then the LED #0 shall not be grouped and shall - * be treated as an individual resource. For all other non-zero - * values of this field, LED #0 shall be grouped together with - * the LEDs with the same group ID value. + * This value sets the mask value for the L2 address. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint8_t rsvd0; - /* Reserved field. */ - uint8_t led1_id; - /* An identifier for the LED #1. */ - uint8_t led1_state; - /* The requested state of the LED #1. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4) - uint8_t led1_color; - /* The requested color of LED #1. */ - /* Default */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_1; - uint16_t led1_blink_on; + uint8_t l2_addr_mask[6]; + /* This value sets VLAN ID value for outer VLAN. */ + uint16_t l2_ovlan; /* - * If the LED #1 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * This value sets the mask value for the ovlan id. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint16_t led1_blink_off; + uint16_t l2_ovlan_mask; + /* This value sets VLAN ID value for inner VLAN. */ + uint16_t l2_ivlan; /* - * If the LED #1 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * This value sets the mask value for the ivlan id. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint8_t led1_group_id; + uint16_t l2_ivlan_mask; + uint8_t unused_1[2]; /* - * An identifier for the group of LEDs that LED #1 belongs to. - * If set to 0, then the LED #1 shall not be grouped and shall - * be treated as an individual resource. For all other non-zero - * values of this field, LED #1 shall be grouped together with - * the LEDs with the same group ID value. + * This value sets the match value for the tunnel + * L2 MAC address. + * Destination MAC address for RX path. + * Source MAC address for TX path. */ - uint8_t rsvd1; - /* Reserved field. */ - uint8_t led2_id; - /* An identifier for the LED #2. */ - uint8_t led2_state; - /* The requested state of the LED #2. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4) - uint8_t led2_color; - /* The requested color of LED #2. */ - /* Default */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_2; - uint16_t led2_blink_on; + uint8_t t_l2_addr[6]; + uint8_t unused_2[2]; /* - * If the LED #2 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * This value sets the mask value for the tunnel L2 + * address. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint16_t led2_blink_off; + uint8_t t_l2_addr_mask[6]; + /* This value sets VLAN ID value for tunnel outer VLAN. */ + uint16_t t_l2_ovlan; /* - * If the LED #2 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * This value sets the mask value for the tunnel ovlan id. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint8_t led2_group_id; + uint16_t t_l2_ovlan_mask; + /* This value sets VLAN ID value for tunnel inner VLAN. */ + uint16_t t_l2_ivlan; /* - * An identifier for the group of LEDs that LED #2 belongs to. - * If set to 0, then the LED #2 shall not be grouped and shall - * be treated as an individual resource. For all other non-zero - * values of this field, LED #2 shall be grouped together with - * the LEDs with the same group ID value. + * This value sets the mask value for the tunnel ivlan id. + * A value of 0 will mask the corresponding bit from + * compare. */ - uint8_t rsvd2; - /* Reserved field. */ - uint8_t led3_id; - /* An identifier for the LED #3. */ - uint8_t led3_state; - /* The requested state of the LED #3. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4) - uint8_t led3_color; - /* The requested color of LED #3. */ - /* Default */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_3; - uint16_t led3_blink_on; + uint16_t t_l2_ivlan_mask; + /* This value identifies the type of source of the packet. */ + uint8_t src_type; + /* Network port */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0) + /* Physical function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1) + /* Virtual function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2) + /* Virtual NIC of a function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3) + /* Embedded processor for CFA management */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4) + /* Embedded processor for OOB management */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5) + /* Embedded processor for RoCE */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6) + /* Embedded processor for network proxy functions */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG + uint8_t unused_3; + /* + * This value is the id of the source. + * For a network port, it represents port_id. + * For a physical function, it represents fid. + * For a virtual function, it represents vf_id. + * For a vnic, it represents vnic_id. + * For embedded processors, this id is not valid. + * + * Notes: + * 1. The function ID is implied if it src_id is + * not provided for a src_type that is either + */ + uint32_t src_id; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_4; /* - * If the LED #3 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. */ - uint16_t led3_blink_off; + uint16_t dst_id; /* - * If the LED #3 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - uint8_t led3_group_id; + uint16_t mirror_vnic_id; /* - * An identifier for the group of LEDs that LED #3 belongs to. - * If set to 0, then the LED #3 shall not be grouped and shall - * be treated as an individual resource. For all other non-zero - * values of this field, LED #3 shall be grouped together with - * the LEDs with the same group ID value. + * This hint is provided to help in placing + * the filter in the filter table. */ - uint8_t rsvd3; - /* Reserved field. */ + uint8_t pri_hint; + /* No preference */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ + UINT32_C(0x0) + /* Above the given filter */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \ + UINT32_C(0x1) + /* Below the given filter */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \ + UINT32_C(0x2) + /* As high as possible */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX \ + UINT32_C(0x3) + /* As low as possible */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN \ + UINT32_C(0x4) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN + uint8_t unused_5; + uint32_t unused_6; + /* + * This is the ID of the filter that goes along with + * the pri_hint. + * + * This field is valid only for the following values. + * 1 - Above the given filter + * 2 - Below the given filter + */ + uint64_t l2_filter_id_hint; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_port_led_cfg_output { - uint16_t error_code; +/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_l2_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This value identifies a set of CFA data structures used for an L2 + * context. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t l2_filter_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This is the ID of the flow associated with this + * filter. + * This value shall be used to match and associate the + * flow identifier returned in completion records. + * A value of 0xFFFFFFFF shall indicate no flow id. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint32_t flow_id; + uint8_t unused_0[3]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_port_led_qcfg */ -/* - * Description: This function is used to query configuration of LEDs on a given - * port. Each port has individual set of LEDs associated with it. These LEDs are - * used for speed/link configuration as well as activity indicator - * configuration. Up to three LEDs can be configured, one for activity and two - * for speeds. - */ -/* Input (24 bytes) */ -struct hwrm_port_led_qcfg_input { - uint16_t req_type; - /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. - */ - uint16_t cmpl_ring; - /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. - */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; +/*************************** + * hwrm_cfa_l2_filter_free * + ***************************/ + + +/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_l2_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t resp_addr; + uint16_t cmpl_ring; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t port_id; - /* Port ID of port whose LED configuration is being queried. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (56 bytes) */ -struct hwrm_port_led_qcfg_output { - uint16_t error_code; + uint16_t seq_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t target_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t num_leds; + uint64_t resp_addr; /* - * The number of LEDs that are configured on this port. Up to 4 - * LEDs can be returned in the response. + * This value identifies a set of CFA data structures used for an L2 + * context. */ - uint8_t led0_id; - /* An identifier for the LED #0. */ - uint8_t led0_type; - /* The type of LED #0. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) - uint8_t led0_state; - /* The current state of the LED #0. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4) - uint8_t led0_color; - /* The color of LED #0. */ - /* Default */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_0; - uint16_t led0_blink_on; + uint64_t l2_filter_id; +} __attribute__((packed)); + +/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * If the LED #0 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t led0_blink_off; + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_cfa_l2_filter_cfg * + **************************/ + + +/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */ +struct hwrm_cfa_l2_filter_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If the LED #0 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint8_t led0_group_id; + uint16_t cmpl_ring; /* - * An identifier for the group of LEDs that LED #0 belongs to. - * If set to 0, then the LED #0 is not grouped. For all other - * non-zero values of this field, LED #0 is grouped together - * with the LEDs with the same group ID value. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t led1_id; - /* An identifier for the LED #1. */ - uint8_t led1_type; - /* The type of LED #1. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) - uint8_t led1_state; - /* The current state of the LED #1. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4) - uint8_t led1_color; - /* The color of LED #1. */ - /* Default */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_1; - uint16_t led1_blink_on; + uint16_t seq_id; /* - * If the LED #1 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t led1_blink_off; + uint16_t target_id; /* - * If the LED #1 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t led1_group_id; + uint64_t resp_addr; + uint32_t flags; /* - * An identifier for the group of LEDs that LED #1 belongs to. - * If set to 0, then the LED #1 is not grouped. For all other - * non-zero values of this field, LED #1 is grouped together - * with the LEDs with the same group ID value. + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - uint8_t led2_id; - /* An identifier for the LED #2. */ - uint8_t led2_type; - /* The type of LED #2. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) - uint8_t led2_state; - /* The current state of the LED #2. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4) - uint8_t led2_color; - /* The color of LED #2. */ - /* Default */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_2; - uint16_t led2_blink_on; + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX /* - * If the LED #2 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. */ - uint16_t led2_blink_off; + #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2) + uint32_t enables; /* - * If the LED #2 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * This bit must be '1' for the dst_id field to be + * configured. */ - uint8_t led2_group_id; + #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID \ + UINT32_C(0x1) /* - * An identifier for the group of LEDs that LED #2 belongs to. - * If set to 0, then the LED #2 is not grouped. For all other - * non-zero values of this field, LED #2 is grouped together - * with the LEDs with the same group ID value. + * This bit must be '1' for the new_mirror_vnic_id field to be + * configured. */ - uint8_t led3_id; - /* An identifier for the LED #3. */ - uint8_t led3_type; - /* The type of LED #3. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) - uint8_t led3_state; - /* The current state of the LED #3. */ - /* Default state of the LED */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0) - /* Off */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1) - /* On */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2) - /* Blink */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3) - /* Blink Alternately */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4) - uint8_t led3_color; - /* The color of LED #3. */ - /* Default */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0) - /* Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1) - /* Green */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2) - /* Green or Amber */ - #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3) - uint8_t unused_3; - uint16_t led3_blink_on; + #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) /* - * If the LED #3 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED on - * between cycles. + * This value identifies a set of CFA data structures used for an L2 + * context. */ - uint16_t led3_blink_off; + uint64_t l2_filter_id; /* - * If the LED #3 state is "blink" or "blinkalt", then this field - * represents the requested time in milliseconds to keep LED off - * between cycles. + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. */ - uint8_t led3_group_id; + uint32_t dst_id; /* - * An identifier for the group of LEDs that LED #3 belongs to. - * If set to 0, then the LED #3 is not grouped. For all other - * non-zero values of this field, LED #3 is grouped together - * with the LEDs with the same group ID value. + * New Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - uint8_t unused_4; - uint16_t unused_5; - uint8_t unused_6; - uint8_t unused_7; - uint8_t unused_8; - uint8_t valid; + uint32_t new_mirror_vnic_id; +} __attribute__((packed)); + +/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_port_led_qcaps */ -/* - * Description: This function is used to query capabilities of LEDs on a given - * port. Each port has individual set of LEDs associated with it. These LEDs are - * used for speed/link configuration as well as activity indicator - * configuration. - */ -/* Input (24 bytes) */ -struct hwrm_port_led_qcaps_input { - uint16_t req_type; +/*************************** + * hwrm_cfa_l2_set_rx_mask * + ***************************/ + + +/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */ +struct hwrm_cfa_l2_set_rx_mask_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t port_id; - /* Port ID of port whose LED configuration is being queried. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (48 bytes) */ -struct hwrm_port_led_qcaps_output { - uint16_t error_code; + uint64_t resp_addr; + /* VNIC ID */ + uint32_t vnic_id; + uint32_t mask; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * When this bit is '1', the function is requested to accept + * multi-cast packets specified by the multicast addr table. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST \ + UINT32_C(0x2) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', the function is requested to accept + * all multi-cast packets. */ - uint8_t num_leds; + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST \ + UINT32_C(0x4) /* - * The number of LEDs that are configured on this port. Up to 4 - * LEDs can be returned in the response. + * When this bit is '1', the function is requested to accept + * broadcast packets. */ - uint8_t unused_0[3]; - /* Reserved for future use. */ - uint8_t led0_id; - /* An identifier for the LED #0. */ - uint8_t led0_type; - /* The type of LED #0. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff) - uint8_t led0_group_id; + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST \ + UINT32_C(0x8) /* - * An identifier for the group of LEDs that LED #0 belongs to. - * If set to 0, then the LED #0 cannot be grouped. For all other - * non-zero values of this field, LED #0 is grouped together - * with the LEDs with the same group ID value. + * When this bit is '1', the function is requested to be + * put in the promiscuous mode. + * + * The HWRM should accept any function to set up + * promiscuous mode. + * + * The HWRM shall follow the semantics below for the + * promiscuous mode support. + * # When partitioning is not enabled on a port + * (i.e. single PF on the port), then the PF shall + * be allowed to be in the promiscuous mode. When the + * PF is in the promiscuous mode, then it shall + * receive all host bound traffic on that port. + * # When partitioning is enabled on a port + * (i.e. multiple PFs per port) and a PF on that + * port is in the promiscuous mode, then the PF + * receives all traffic within that partition as + * identified by a unique identifier for the + * PF (e.g. S-Tag). If a unique outer VLAN + * for the PF is specified, then the setting of + * promiscuous mode on that PF shall result in the + * PF receiving all host bound traffic with matching + * outer VLAN. + * # A VF shall can be set in the promiscuous mode. + * In the promiscuous mode, the VF does not receive any + * traffic unless a unique outer VLAN for the + * VF is specified. If a unique outer VLAN + * for the VF is specified, then the setting of + * promiscuous mode on that VF shall result in the + * VF receiving all host bound traffic with the + * matching outer VLAN. + * # The HWRM shall allow the setting of promiscuous + * mode on a function independently from the + * promiscuous mode settings on other functions. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS \ + UINT32_C(0x10) + /* + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for the outermost Layer 2 destination MAC + * address field. */ - uint8_t unused_1; - uint16_t led0_state_caps; - /* The states supported by LED #0. */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST \ + UINT32_C(0x20) /* - * If set to 1, this LED is enabled. If set to 0, this LED is - * disabled. + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for the VLAN-tagged packets that match the + * TPID and VID fields of VLAN tags in the VLAN tag + * table specified in this command. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED UINT32_C(0x1) + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY \ + UINT32_C(0x40) /* - * If set to 1, off state is supported on this LED. If set to 0, - * off state is not supported on this LED. + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for non-VLAN tagged packets and VLAN-tagged + * packets that match the TPID and VID fields of VLAN + * tags in the VLAN tag table specified in this command. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \ - UINT32_C(0x2) + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN \ + UINT32_C(0x80) /* - * If set to 1, on state is supported on this LED. If set to 0, - * on state is not supported on this LED. + * If this flag is set, the corresponding RX + * filters shall be set up to cover multicast/broadcast + * filters for non-VLAN tagged packets and VLAN-tagged + * packets matching any VLAN tag. + * + * If this flag is set, then the HWRM shall ignore + * VLAN tags specified in vlan_tag_tbl. + * + * If none of vlanonly, vlan_nonvlan, and anyvlan_nonvlan + * flags is set, then the HWRM shall ignore + * VLAN tags specified in vlan_tag_tbl. + * + * The HWRM client shall set at most one flag out of + * vlanonly, vlan_nonvlan, and anyvlan_nonvlan. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \ + UINT32_C(0x100) + /* This is the address for mcast address tbl. */ + uint64_t mc_tbl_addr; + /* + * This value indicates how many entries in mc_tbl are valid. + * Each entry is 6 bytes. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \ - UINT32_C(0x4) + uint32_t num_mc_entries; + uint8_t unused_0[4]; /* - * If set to 1, blink state is supported on this LED. If set to - * 0, blink state is not supported on this LED. + * This is the address for VLAN tag table. + * Each VLAN entry in the table is 4 bytes of a VLAN tag + * including TPID, PCP, DEI, and VID fields in network byte + * order. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \ - UINT32_C(0x8) + uint64_t vlan_tag_tbl_addr; /* - * If set to 1, blink_alt state is supported on this LED. If set - * to 0, blink_alt state is not supported on this LED. + * This value indicates how many entries in vlan_tag_tbl are + * valid. Each entry is 4 bytes. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \ - UINT32_C(0x10) - uint16_t led0_color_caps; - /* The colors supported by LED #0. */ - /* reserved */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD UINT32_C(0x1) + uint32_t num_vlan_tags; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */ +struct hwrm_cfa_l2_set_rx_mask_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * If set to 1, Amber color is supported on this LED. If set to - * 0, Amber color is not supported on this LED. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \ - UINT32_C(0x2) + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */ +struct hwrm_cfa_l2_set_rx_mask_cmd_err { /* - * If set to 1, Green color is supported on this LED. If set to - * 0, Green color is not supported on this LED. + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \ - UINT32_C(0x4) - uint8_t led1_id; - /* An identifier for the LED #1. */ - uint8_t led1_type; - /* The type of LED #1. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff) - uint8_t led1_group_id; + uint8_t code; + /* Unknown error */ + #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN \ + UINT32_C(0x0) + /* Unable to complete operation due to conflict with Ntuple Filter */ + #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \ + UINT32_C(0x1) + #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST \ + HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR + uint8_t unused_0[7]; +} __attribute__((packed)); + +/******************************* + * hwrm_cfa_vlan_antispoof_cfg * + *******************************/ + + +/* hwrm_cfa_vlan_antispoof_cfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * An identifier for the group of LEDs that LED #1 belongs to. - * If set to 0, then the LED #0 cannot be grouped. For all other - * non-zero values of this field, LED #0 is grouped together - * with the LEDs with the same group ID value. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint8_t unused_2; - uint16_t led1_state_caps; - /* The states supported by LED #1. */ + uint16_t cmpl_ring; /* - * If set to 1, this LED is enabled. If set to 0, this LED is - * disabled. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED UINT32_C(0x1) + uint16_t seq_id; /* - * If set to 1, off state is supported on this LED. If set to 0, - * off state is not supported on this LED. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \ - UINT32_C(0x2) + uint16_t target_id; /* - * If set to 1, on state is supported on this LED. If set to 0, - * on state is not supported on this LED. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \ - UINT32_C(0x4) + uint64_t resp_addr; /* - * If set to 1, blink state is supported on this LED. If set to - * 0, blink state is not supported on this LED. + * Function ID of the function that is being configured. + * Only valid for a VF FID configured by the PF. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \ - UINT32_C(0x8) + uint16_t fid; + uint8_t unused_0[2]; + /* Number of VLAN entries in the vlan_tag_mask_tbl. */ + uint32_t num_vlan_entries; /* - * If set to 1, blink_alt state is supported on this LED. If set - * to 0, blink_alt state is not supported on this LED. + * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN + * antispoof table. Each table entry contains the 16-bit TPID + * (0x8100 or 0x88a8 only), 16-bit VLAN ID, and a 16-bit mask, + * all in network order to match hwrm_cfa_l2_set_rx_mask. + * For an individual VLAN entry, the mask value should be 0xfff + * for the 12-bit VLAN ID. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \ - UINT32_C(0x10) - uint16_t led1_color_caps; - /* The colors supported by LED #1. */ - /* reserved */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD UINT32_C(0x1) + uint64_t vlan_tag_mask_tbl_addr; +} __attribute__((packed)); + +/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * If set to 1, Amber color is supported on this LED. If set to - * 0, Amber color is not supported on this LED. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \ - UINT32_C(0x2) + uint8_t valid; +} __attribute__((packed)); + +/******************************** + * hwrm_cfa_vlan_antispoof_qcfg * + ********************************/ + + +/* hwrm_cfa_vlan_antispoof_qcfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If set to 1, Green color is supported on this LED. If set to - * 0, Green color is not supported on this LED. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \ - UINT32_C(0x4) - uint8_t led2_id; - /* An identifier for the LED #2. */ - uint8_t led2_type; - /* The type of LED #2. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff) - uint8_t led2_group_id; + uint16_t cmpl_ring; /* - * An identifier for the group of LEDs that LED #0 belongs to. - * If set to 0, then the LED #0 cannot be grouped. For all other - * non-zero values of this field, LED #0 is grouped together - * with the LEDs with the same group ID value. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t unused_3; - uint16_t led2_state_caps; - /* The states supported by LED #2. */ + uint16_t seq_id; /* - * If set to 1, this LED is enabled. If set to 0, this LED is - * disabled. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED UINT32_C(0x1) + uint16_t target_id; /* - * If set to 1, off state is supported on this LED. If set to 0, - * off state is not supported on this LED. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \ - UINT32_C(0x2) + uint64_t resp_addr; /* - * If set to 1, on state is supported on this LED. If set to 0, - * on state is not supported on this LED. + * Function ID of the function that is being queried. + * Only valid for a VF FID queried by the PF. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \ - UINT32_C(0x4) + uint16_t fid; + uint8_t unused_0[2]; /* - * If set to 1, blink state is supported on this LED. If set to - * 0, blink state is not supported on this LED. + * Maximum number of VLAN entries the firmware is allowed to DMA + * to vlan_tag_mask_tbl. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \ - UINT32_C(0x8) + uint32_t max_vlan_entries; /* - * If set to 1, blink_alt state is supported on this LED. If set - * to 0, blink_alt state is not supported on this LED. + * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN + * antispoof table to which firmware will DMA to. Each table + * entry will contain the 16-bit TPID (0x8100 or 0x88a8 only), + * 16-bit VLAN ID, and a 16-bit mask, all in network order to + * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry, + * the mask value should be 0xfff for the 12-bit VLAN ID. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \ - UINT32_C(0x10) - uint16_t led2_color_caps; - /* The colors supported by LED #2. */ - /* reserved */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD UINT32_C(0x1) + uint64_t vlan_tag_mask_tbl_addr; +} __attribute__((packed)); + +/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of valid entries DMAd by firmware to vlan_tag_mask_tbl. */ + uint32_t num_vlan_entries; + uint8_t unused_0[3]; /* - * If set to 1, Amber color is supported on this LED. If set to - * 0, Amber color is not supported on this LED. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \ - UINT32_C(0x2) + uint8_t valid; +} __attribute__((packed)); + +/******************************** + * hwrm_cfa_tunnel_filter_alloc * + ********************************/ + + +/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */ +struct hwrm_cfa_tunnel_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If set to 1, Green color is supported on this LED. If set to - * 0, Green color is not supported on this LED. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \ - UINT32_C(0x4) - uint8_t led3_id; - /* An identifier for the LED #3. */ - uint8_t led3_type; - /* The type of LED #3. */ - /* Speed LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0) - /* Activity LED */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1) - /* Invalid */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff) - uint8_t led3_group_id; + uint16_t cmpl_ring; /* - * An identifier for the group of LEDs that LED #3 belongs to. - * If set to 0, then the LED #0 cannot be grouped. For all other - * non-zero values of this field, LED #0 is grouped together - * with the LEDs with the same group ID value. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t unused_4; - uint16_t led3_state_caps; - /* The states supported by LED #3. */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ + uint64_t resp_addr; + uint32_t flags; + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x1) + uint32_t enables; /* - * If set to 1, this LED is enabled. If set to 0, this LED is - * disabled. + * This bit must be '1' for the l2_filter_id field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED UINT32_C(0x1) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ + UINT32_C(0x1) /* - * If set to 1, off state is supported on this LED. If set to 0, - * off state is not supported on this LED. + * This bit must be '1' for the l2_addr field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \ UINT32_C(0x2) /* - * If set to 1, on state is supported on this LED. If set to 0, - * on state is not supported on this LED. + * This bit must be '1' for the l2_ivlan field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \ UINT32_C(0x4) /* - * If set to 1, blink state is supported on this LED. If set to - * 0, blink state is not supported on this LED. + * This bit must be '1' for the l3_addr field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR \ UINT32_C(0x8) /* - * If set to 1, blink_alt state is supported on this LED. If set - * to 0, blink_alt state is not supported on this LED. + * This bit must be '1' for the l3_addr_type field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR_TYPE \ UINT32_C(0x10) - uint16_t led3_color_caps; - /* The colors supported by LED #3. */ - /* reserved */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD UINT32_C(0x1) /* - * If set to 1, Amber color is supported on this LED. If set to - * 0, Amber color is not supported on this LED. + * This bit must be '1' for the t_l3_addr_type field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \ - UINT32_C(0x2) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR_TYPE \ + UINT32_C(0x20) /* - * If set to 1, Green color is supported on this LED. If set to - * 0, Green color is not supported on this LED. + * This bit must be '1' for the t_l3_addr field to be + * configured. */ - #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \ - UINT32_C(0x4) - uint8_t unused_5; - uint8_t unused_6; - uint8_t unused_7; - uint8_t valid; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR \ + UINT32_C(0x40) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the tunnel_type field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_queue_qportcfg */ -/* - * Description: This function is called by a driver to query queue configuration - * of a port. # The HWRM shall at least advertise one queue with lossy service - * profile. # The driver shall use this command to query queue ids before - * configuring or using any queues. # If a service profile is not set for a - * queue, then the driver shall not use that queue without configuring a service - * profile for it. # If the driver is not allowed to configure service profiles, - * then the driver shall only use queues for which service profiles are pre- - * configured. - */ -/* Input (24 bytes) */ -struct hwrm_queue_qportcfg_input { - uint16_t req_type; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x80) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This bit must be '1' for the vni field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_VNI \ + UINT32_C(0x100) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the dst_vnic_id field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_DST_VNIC_ID \ + UINT32_C(0x200) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the mirror_vnic_id field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x400) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This value identifies a set of CFA data structures used for an L2 + * context. */ - uint32_t flags; + uint64_t l2_filter_id; /* - * Enumeration denoting the RX, TX type of the resource. This - * enumeration is used for resources that are similar for both - * TX and RX paths of the chip. + * This value sets the match value for the inner L2 + * MAC address. + * Destination MAC address for RX path. + * Source MAC address for TX path. */ - #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1) - /* tx path */ - #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) - /* rx path */ - #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) - #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \ - QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX - uint16_t port_id; + uint8_t l2_addr[6]; /* - * Port ID of port for which the queue configuration is being - * queried. This field is only required when sent by IPC. + * This value sets VLAN ID value for inner VLAN. + * Only 12-bits of VLAN ID are used in setting the filter. */ - uint16_t unused_0; -} __attribute__((packed)); - -/* Output (32 bytes) */ -struct hwrm_queue_qportcfg_output { - uint16_t error_code; + uint16_t l2_ivlan; + /* + * The value of inner destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t l3_addr[4]; + /* + * The value of tunnel destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t t_l3_addr[4]; + /* + * This value indicates the type of inner IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. + */ + uint8_t l3_addr_type; + /* + * This value indicates the type of tunnel IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. + */ + uint8_t t_l3_addr_type; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * tunnel_flags allows the user to indicate the tunnel tag detection + * for the tunnel type specified in tunnel_type. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint8_t tunnel_flags; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * If the tunnel_type is geneve, then this bit indicates if we + * need to match the geneve OAM packet. + * If the tunnel_type is nvgre or gre, then this bit indicates if + * we need to detect checksum present bit in geneve header. + * If the tunnel_type is mpls, then this bit indicates if we need + * to match mpls packet with explicit IPV4/IPV6 null header. */ - uint8_t max_configurable_queues; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR \ + UINT32_C(0x1) /* - * The maximum number of queues that can be configured on this - * port. Valid values range from 1 through 8. + * If the tunnel_type is geneve, then this bit indicates if we + * need to detect the critical option bit set in the oam packet. + * If the tunnel_type is nvgre or gre, then this bit indicates + * if we need to match nvgre packets with key present bit set in + * gre header. + * If the tunnel_type is mpls, then this bit indicates if we + * need to match mpls packet with S bit from inner/second label. */ - uint8_t max_configurable_lossless_queues; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 \ + UINT32_C(0x2) /* - * The maximum number of lossless queues that can be configured - * on this port. Valid values range from 0 through 8. + * If the tunnel_type is geneve, then this bit indicates if we + * need to match geneve packet with extended header bit set in + * geneve header. + * If the tunnel_type is nvgre or gre, then this bit indicates + * if we need to match nvgre packets with sequence number + * present bit set in gre header. + * If the tunnel_type is mpls, then this bit indicates if we + * need to match mpls packet with S bit from out/first label. */ - uint8_t queue_cfg_allowed; + #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 \ + UINT32_C(0x4) /* - * Bitmask indicating which queues can be configured by the - * hwrm_queue_cfg command. Each bit represents a specific queue - * where bit 0 represents queue 0 and bit 7 represents queue 7. - * # A value of 0 indicates that the queue is not configurable - * by the hwrm_queue_cfg command. # A value of 1 indicates that - * the queue is configurable. # A hwrm_queue_cfg command shall - * return error when trying to configure a queue not - * configurable. + * Virtual Network Identifier (VNI). Only valid with + * tunnel_types VXLAN, NVGRE, and Geneve. + * Only lower 24-bits of VNI field are used + * in setting up the filter. */ - uint8_t queue_cfg_info; - /* Information about queue configuration. */ + uint32_t vni; + /* Logical VNIC ID of the destination VNIC. */ + uint32_t dst_vnic_id; /* - * If this flag is set to '1', then the queues are configured - * asymmetrically on TX and RX sides. If this flag is set to - * '0', then the queues are configured symmetrically on TX and - * RX sides. For symmetric configuration, the queue - * configuration including queue ids and service profiles on the - * TX side is the same as the corresponding queue configuration - * on the RX side. + * Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG UINT32_C(0x1) - uint8_t queue_pfcenable_cfg_allowed; + uint32_t mirror_vnic_id; +} __attribute__((packed)); + +/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint64_t tunnel_filter_id; /* - * Bitmask indicating which queues can be configured by the - * hwrm_queue_pfcenable_cfg command. Each bit represents a - * specific priority where bit 0 represents priority 0 and bit 7 - * represents priority 7. # A value of 0 indicates that the - * priority is not configurable by the hwrm_queue_pfcenable_cfg - * command. # A value of 1 indicates that the priority is - * configurable. # A hwrm_queue_pfcenable_cfg command shall - * return error when trying to configure a priority that is not - * configurable. + * This is the ID of the flow associated with this + * filter. + * This value shall be used to match and associate the + * flow identifier returned in completion records. + * A value of 0xFFFFFFFF shall indicate no flow id. */ - uint8_t queue_pri2cos_cfg_allowed; + uint32_t flow_id; + uint8_t unused_0[3]; /* - * Bitmask indicating which queues can be configured by the - * hwrm_queue_pri2cos_cfg command. Each bit represents a - * specific queue where bit 0 represents queue 0 and bit 7 - * represents queue 7. # A value of 0 indicates that the queue - * is not configurable by the hwrm_queue_pri2cos_cfg command. # - * A value of 1 indicates that the queue is configurable. # A - * hwrm_queue_pri2cos_cfg command shall return error when trying - * to configure a queue that is not configurable. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint8_t queue_cos2bw_cfg_allowed; + uint8_t valid; +} __attribute__((packed)); + +/******************************* + * hwrm_cfa_tunnel_filter_free * + *******************************/ + + +/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Bitmask indicating which queues can be configured by the - * hwrm_queue_pri2cos_cfg command. Each bit represents a - * specific queue where bit 0 represents queue 0 and bit 7 - * represents queue 7. # A value of 0 indicates that the queue - * is not configurable by the hwrm_queue_pri2cos_cfg command. # - * A value of 1 indicates that the queue is configurable. # A - * hwrm_queue_pri2cos_cfg command shall return error when trying - * to configure a queue not configurable. - */ - uint8_t queue_id0; - /* - * ID of CoS Queue 0. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint8_t queue_id0_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint16_t cmpl_ring; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id1; + uint16_t seq_id; /* - * ID of CoS Queue 1. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t queue_id1_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint16_t target_id; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id2; + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint64_t tunnel_filter_id; +} __attribute__((packed)); + +/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_tunnel_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * ID of CoS Queue 2. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint8_t queue_id2_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint8_t valid; +} __attribute__((packed)); + +/*************************************** + * hwrm_cfa_redirect_tunnel_type_alloc * + ***************************************/ + + +/* hwrm_cfa_redirect_tunnel_type_alloc_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id3; + uint16_t cmpl_ring; /* - * ID of CoS Queue 3. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t queue_id3_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint16_t seq_id; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id4; + uint16_t target_id; /* - * ID of CoS Queue 4. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t queue_id4_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \ + uint64_t resp_addr; + /* The destination function id, to whom the traffic is redirected. */ + uint16_t dest_fid; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \ + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ UINT32_C(0x1) - /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified - */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \ + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Any tunneled traffic */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ UINT32_C(0xff) - uint8_t queue_id5; + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + /* Tunnel alloc flags. */ + uint8_t flags; + /* Setting of this flag indicates modify existing redirect tunnel to new destination function ID. */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_FLAGS_MODIFY_DST \ + UINT32_C(0x1) + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * ID of CoS Queue 5. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint8_t queue_id5_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint8_t valid; +} __attribute__((packed)); + +/************************************** + * hwrm_cfa_redirect_tunnel_type_free * + **************************************/ + + +/* hwrm_cfa_redirect_tunnel_type_free_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id6; + uint16_t cmpl_ring; /* - * ID of CoS Queue 6. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t queue_id6_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \ - UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \ - UINT32_C(0x1) + uint16_t seq_id; /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \ - UINT32_C(0xff) - uint8_t queue_id7; + uint16_t target_id; /* - * ID of CoS Queue 7. FF - Invalid id # This ID can be used on - * any subsequent call to an hwrm command that takes a queue id. - * # IDs must always be queried by this command before any use - * by the driver or software. # Any driver or software should - * not make any assumptions about queue IDs. # A value of 0xff - * indicates that the queue is not available. # Available queues - * may not be in sequential order. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t queue_id7_service_profile; - /* This value is applicable to CoS queues only. */ - /* Lossy (best-effort) */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \ + uint64_t resp_addr; + /* The destination function id, to whom the traffic is redirected. */ + uint16_t dest_fid; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NONTUNNEL \ UINT32_C(0x0) - /* Lossless */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \ + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN \ UINT32_C(0x1) - /* - * Set to 0xFF... (All Fs) if there is no - * service profile specified - */ - #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \ + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Any tunneled traffic */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL \ UINT32_C(0xff) - uint8_t valid; + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0[5]; +} __attribute__((packed)); + +/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/********************* - * hwrm_port_mac_cfg * - *********************/ +/************************************** + * hwrm_cfa_redirect_tunnel_type_info * + **************************************/ -/* hwrm_port_mac_cfg_input (size:320b/40B) */ -struct hwrm_port_mac_cfg_input { +/* hwrm_cfa_redirect_tunnel_type_info_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_info_input { + /* The HWRM command request type. */ uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ uint64_t resp_addr; - uint32_t flags; - #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL - #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL - #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL - #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL - #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL - #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL - #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL - #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL - #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL - #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL - uint32_t enables; - #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL - #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL - #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL - #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL - #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL - #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL - #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL - #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL - uint16_t port_id; - uint8_t ipg; - uint8_t lpbk; - #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL - #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL - #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL - #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE - uint8_t vlan_pri2cos_map_pri; - uint8_t reserved1; - uint8_t tunnel_pri2cos_map_pri; - uint8_t dscp2pri_map_pri; - uint16_t rx_ts_capture_ptp_msg_type; - uint16_t tx_ts_capture_ptp_msg_type; - uint8_t cos_field_cfg; - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \ - (0x0UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \ - (0x1UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \ - (0x2UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \ - (0x3UL << 1) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \ - PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \ - (0x0UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \ - (0x1UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \ - (0x2UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \ - (0x3UL << 3) - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \ - PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL - #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5 - uint8_t unused_0[3]; -}; - + /* The source function id. */ + uint16_t src_fid; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + /* Any tunneled traffic */ + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0[5]; +} __attribute__((packed)); -/* hwrm_port_mac_cfg_output (size:128b/16B) */ -struct hwrm_port_mac_cfg_output { +/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_info_output { + /* The specific error status for the command. */ uint16_t error_code; + /* The HWRM command request type. */ uint16_t req_type; + /* The sequence ID from the original command. */ uint16_t seq_id; + /* The length of the response data in number of bytes. */ uint16_t resp_len; - uint16_t mru; - uint16_t mtu; - uint8_t ipg; - uint8_t lpbk; - #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL - #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL - #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL - #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE - uint8_t unused_0; - uint8_t valid; -}; + /* The destination function id, to whom the traffic is redirected. */ + uint16_t dest_fid; + uint8_t unused_0[5]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */ +struct hwrm_vxlan_ipv4_hdr { + /* IPv4 version and header length. */ + uint8_t ver_hlen; + /* IPv4 header length */ + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK UINT32_C(0xf) + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 + /* Version */ + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK UINT32_C(0xf0) + #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 + /* IPv4 type of service. */ + uint8_t tos; + /* IPv4 identification. */ + uint16_t ip_id; + /* IPv4 flags and offset. */ + uint16_t flags_frag_offset; + /* IPv4 TTL. */ + uint8_t ttl; + /* IPv4 protocol. */ + uint8_t protocol; + /* IPv4 source address. */ + uint32_t src_ip_addr; + /* IPv4 destination address. */ + uint32_t dest_ip_addr; +} __attribute__((packed)); +/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */ +struct hwrm_vxlan_ipv6_hdr { + /* IPv6 version, traffic class and flow label. */ + uint32_t ver_tc_flow_label; + /* IPv6 version shift */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT \ + UINT32_C(0x1c) + /* IPv6 version mask */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK \ + UINT32_C(0xf0000000) + /* IPv6 TC shift */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT \ + UINT32_C(0x14) + /* IPv6 TC mask */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK \ + UINT32_C(0xff00000) + /* IPv6 flow label shift */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT \ + UINT32_C(0x0) + /* IPv6 flow label mask */ + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK \ + UINT32_C(0xfffff) + #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST \ + HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK + /* IPv6 payload length. */ + uint16_t payload_len; + /* IPv6 next header. */ + uint8_t next_hdr; + /* IPv6 TTL. */ + uint8_t ttl; + /* IPv6 source address. */ + uint32_t src_ip_addr[4]; + /* IPv6 destination address. */ + uint32_t dest_ip_addr[4]; +} __attribute__((packed)); -/********************** - * hwrm_port_mac_qcfg * - **********************/ +/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */ +struct hwrm_cfa_encap_data_vxlan { + /* Source MAC address. */ + uint8_t src_mac_addr[6]; + /* reserved. */ + uint16_t unused_0; + /* Destination MAC address. */ + uint8_t dst_mac_addr[6]; + /* Number of VLAN tags. */ + uint8_t num_vlan_tags; + /* reserved. */ + uint8_t unused_1; + /* Outer VLAN TPID. */ + uint16_t ovlan_tpid; + /* Outer VLAN TCI. */ + uint16_t ovlan_tci; + /* Inner VLAN TPID. */ + uint16_t ivlan_tpid; + /* Inner VLAN TCI. */ + uint16_t ivlan_tci; + /* L3 header fields. */ + uint32_t l3[10]; + /* IP version mask. */ + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_MASK UINT32_C(0xf) + /* IP version 4. */ + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 UINT32_C(0x4) + /* IP version 6. */ + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 UINT32_C(0x6) + #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_LAST \ + HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 + /* UDP source port. */ + uint16_t src_port; + /* UDP destination port. */ + uint16_t dst_port; + /* VXLAN Network Identifier. */ + uint32_t vni; +} __attribute__((packed)); +/******************************* + * hwrm_cfa_encap_record_alloc * + *******************************/ -/* hwrm_port_mac_qcfg_input (size:192b/24B) */ -struct hwrm_port_mac_qcfg_input { + +/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */ +struct hwrm_cfa_encap_record_alloc_input { + /* The HWRM command request type. */ uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ uint64_t resp_addr; - uint16_t port_id; - uint8_t unused_0[6]; -}; - + uint32_t flags; + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x1) + /* Encapsulation Type. */ + uint8_t encap_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) after inside Ethernet payload */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_MPLS \ + UINT32_C(0x6) + /* VLAN */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VLAN \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE \ + UINT32_C(0x8) + #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_LAST \ + HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE + uint8_t unused_0[3]; + /* This value is encap data used for the given encap type. */ + uint32_t encap_data[20]; +} __attribute__((packed)); -/* hwrm_port_mac_qcfg_output (size:192b/24B) */ -struct hwrm_port_mac_qcfg_output { +/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_alloc_output { + /* The specific error status for the command. */ uint16_t error_code; + /* The HWRM command request type. */ uint16_t req_type; + /* The sequence ID from the original command. */ uint16_t seq_id; + /* The length of the response data in number of bytes. */ uint16_t resp_len; - uint16_t mru; - uint16_t mtu; - uint8_t ipg; - uint8_t lpbk; - #define PORT_MAC_QCFG_RESP_LPBK_NONE 0x0UL - #define PORT_MAC_QCFG_RESP_LPBK_LOCAL 0x1UL - #define PORT_MAC_QCFG_RESP_LPBK_REMOTE 0x2UL - #define PORT_MAC_QCFG_RESP_LPBK_LAST PORT_MAC_QCFG_RESP_LPBK_REMOTE - uint8_t vlan_pri2cos_map_pri; - uint8_t flags; - #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE 0x1UL - #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE 0x2UL - #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE 0x4UL - #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE 0x8UL - #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL - #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x20UL - uint8_t tunnel_pri2cos_map_pri; - uint8_t dscp2pri_map_pri; - uint16_t rx_ts_capture_ptp_msg_type; - uint16_t tx_ts_capture_ptp_msg_type; - uint8_t cos_field_cfg; - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD 0x1UL - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \ - (0x0UL << 1) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \ - (0x1UL << 1) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \ - (0x2UL << 1) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \ - (0x3UL << 1) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \ - PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \ - (0x0UL << 3) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \ - (0x1UL << 3) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \ - (0x2UL << 3) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \ - (0x3UL << 3) - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \ - PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL - #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT 5 - uint8_t valid; -}; - - -/************************** - * hwrm_port_mac_ptp_qcfg * - **************************/ + /* This value is an opaque id into CFA data structures. */ + uint32_t encap_record_id; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); +/****************************** + * hwrm_cfa_encap_record_free * + ******************************/ -/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ -struct hwrm_port_mac_ptp_qcfg_input { + +/* hwrm_cfa_encap_record_free_input (size:192b/24B) */ +struct hwrm_cfa_encap_record_free_input { + /* The HWRM command request type. */ uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. + */ uint64_t resp_addr; - uint16_t port_id; - uint8_t unused_0[6]; -}; - + /* This value is an opaque id into CFA data structures. */ + uint32_t encap_record_id; + uint8_t unused_0[4]; +} __attribute__((packed)); -/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */ -struct hwrm_port_mac_ptp_qcfg_output { +/* hwrm_cfa_encap_record_free_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_free_output { + /* The specific error status for the command. */ uint16_t error_code; + /* The HWRM command request type. */ uint16_t req_type; + /* The sequence ID from the original command. */ uint16_t seq_id; + /* The length of the response data in number of bytes. */ uint16_t resp_len; - uint8_t flags; - #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL - #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL - uint8_t unused_0[3]; - uint32_t rx_ts_reg_off_lower; - uint32_t rx_ts_reg_off_upper; - uint32_t rx_ts_reg_off_seq_id; - uint32_t rx_ts_reg_off_src_id_0; - uint32_t rx_ts_reg_off_src_id_1; - uint32_t rx_ts_reg_off_src_id_2; - uint32_t rx_ts_reg_off_domain_id; - uint32_t rx_ts_reg_off_fifo; - uint32_t rx_ts_reg_off_fifo_adv; - uint32_t rx_ts_reg_off_granularity; - uint32_t tx_ts_reg_off_lower; - uint32_t tx_ts_reg_off_upper; - uint32_t tx_ts_reg_off_seq_id; - uint32_t tx_ts_reg_off_fifo; - uint32_t tx_ts_reg_off_granularity; - uint8_t unused_1[7]; - uint8_t valid; -}; - - -/* hwrm_vnic_alloc */ -/* - * Description: This VNIC is a resource in the RX side of the chip that is used - * to represent a virtual host "interface". # At the time of VNIC allocation or - * configuration, the function can specify whether it wants the requested VNIC - * to be the default VNIC for the function or not. # If a function requests - * allocation of a VNIC for the first time and a VNIC is successfully allocated - * by the HWRM, then the HWRM shall make the allocated VNIC as the default VNIC - * for that function. # The default VNIC shall be used for the default action - * for a partition or function. # For each VNIC allocated on a function, a - * mapping on the RX side to map the allocated VNIC to source virtual interface - * shall be performed by the HWRM. This should be hidden to the function driver - * requesting the VNIC allocation. This enables broadcast/multicast replication - * with source knockout. # If multicast replication with source knockout is - * enabled, then the internal VNIC to SVIF mapping data structures shall be - * programmed at the time of VNIC allocation. - */ -/* Input (24 bytes) */ -struct hwrm_vnic_alloc_input { - uint16_t req_type; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/******************************** + * hwrm_cfa_ntuple_filter_alloc * + ********************************/ + + +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_ntuple_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t flags; + uint16_t target_id; /* - * When this bit is '1', this VNIC is requested to be the - * default VNIC for this function. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1) - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_alloc_output { - uint16_t error_code; + uint64_t resp_addr; + uint32_t flags; + /* Setting of this flag indicates the applicability to the loopback path. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x1) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP \ + UINT32_C(0x2) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Setting of this flag indicates that a meter is expected to be attached + * to this flow. This hint can be used when choosing the action record + * format required for the flow. */ - uint32_t vnic_id; - /* Logical vnic ID */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER \ + UINT32_C(0x4) + uint32_t enables; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the l2_filter_id field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_vnic_free */ -/* - * Description: Free a VNIC resource. Idle any resources associated with the - * VNIC as well as the VNIC. Reset and release all resources associated with the - * VNIC. - */ -/* Input (24 bytes) */ -struct hwrm_vnic_free_input { - uint16_t req_type; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ + UINT32_C(0x1) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This bit must be '1' for the ethertype field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \ + UINT32_C(0x2) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the tunnel_type field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x4) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the src_macaddr field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \ + UINT32_C(0x8) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the ipaddr_type field to be + * configured. */ - uint32_t vnic_id; - /* Logical vnic ID */ - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_free_output { - uint16_t error_code; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ + UINT32_C(0x10) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This bit must be '1' for the src_ipaddr field to be + * configured. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \ + UINT32_C(0x20) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This bit must be '1' for the src_ipaddr_mask field to be + * configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK \ + UINT32_C(0x40) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the dst_ipaddr field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_vnic_cfg */ -/* Description: Configure the RX VNIC structure. */ -/* Input (40 bytes) */ -struct hwrm_vnic_cfg_input { - uint16_t req_type; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \ + UINT32_C(0x80) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This bit must be '1' for the dst_ipaddr_mask field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK \ + UINT32_C(0x100) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the ip_protocol field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ + UINT32_C(0x200) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the src_port field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \ + UINT32_C(0x400) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the src_port_mask field to be + * configured. */ - uint32_t flags; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \ + UINT32_C(0x800) /* - * When this bit is '1', the VNIC is requested to be the default - * VNIC for the function. + * This bit must be '1' for the dst_port field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT UINT32_C(0x1) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \ + UINT32_C(0x1000) /* - * When this bit is '1', the VNIC is being configured to strip - * VLAN in the RX path. If set to '0', then VLAN stripping is - * disabled on this VNIC. + * This bit must be '1' for the dst_port_mask field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \ + UINT32_C(0x2000) /* - * When this bit is '1', the VNIC is being configured to buffer - * receive packets in the hardware until the host posts new - * receive buffers. If set to '0', then bd_stall is being - * configured to be disabled on this VNIC. + * This bit must be '1' for the pri_hint field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \ + UINT32_C(0x4000) /* - * When this bit is '1', the VNIC is being configured to receive - * both RoCE and non-RoCE traffic. If set to '0', then this VNIC - * is not configured to be operating in dual VNIC mode. + * This bit must be '1' for the ntuple_filter_id field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_NTUPLE_FILTER_ID \ + UINT32_C(0x8000) /* - * When this flag is set to '1', the VNIC is requested to be - * configured to receive only RoCE traffic. If this flag is set - * to '0', then this flag shall be ignored by the HWRM. If - * roce_dual_vnic_mode flag is set to '1', then the HWRM client - * shall not set this flag to '1'. + * This bit must be '1' for the dst_id field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x10000) /* - * When a VNIC uses one destination ring group for certain - * application (e.g. Receive Flow Steering) where exact match is - * used to direct packets to a VNIC with one destination ring - * group only, there is no need to configure RSS indirection - * table for that VNIC as only one destination ring group is - * used. This flag is used to enable a mode where RSS is enabled - * in the VNIC using a RSS context for computing RSS hash but - * the RSS indirection table is not configured using - * hwrm_vnic_rss_cfg. If this mode is enabled, then the driver - * should not program RSS indirection table for the RSS context - * that is used for computing RSS hash only. - */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20) - /* - * When this bit is '1', the VNIC is being configured to receive - * both RoCE and non-RoCE traffic, but forward only the RoCE - * traffic further. Also, RoCE traffic can be mirrored to L2 - * driver. + * This bit must be '1' for the mirror_vnic_id field to be + * configured. */ - #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ - UINT32_C(0x40) - uint32_t enables; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x20000) /* - * This bit must be '1' for the dflt_ring_grp field to be + * This bit must be '1' for the dst_macaddr field to be * configured. */ - #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP UINT32_C(0x1) - /* This bit must be '1' for the rss_rule field to be configured. */ - #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE UINT32_C(0x2) - /* This bit must be '1' for the cos_rule field to be configured. */ - #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE UINT32_C(0x4) - /* This bit must be '1' for the lb_rule field to be configured. */ - #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE UINT32_C(0x8) - /* This bit must be '1' for the mru field to be configured. */ - #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU UINT32_C(0x10) - uint16_t vnic_id; - /* Logical vnic ID */ - uint16_t dflt_ring_grp; + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \ + UINT32_C(0x40000) /* - * Default Completion ring for the VNIC. This ring will be - * chosen if packet does not match any RSS rules and if there is - * no COS rule. + * This value identifies a set of CFA data structures used for an L2 + * context. */ - uint16_t rss_rule; + uint64_t l2_filter_id; /* - * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if - * there is no RSS rule. + * This value indicates the source MAC address in + * the Ethernet header. */ - uint16_t cos_rule; + uint8_t src_macaddr[6]; + /* This value indicates the ethertype in the Ethernet header. */ + uint16_t ethertype; /* - * RSS ID for COS rule/table structure. 0xFF... (All Fs) if - * there is no COS rule. + * This value indicates the type of IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. */ - uint16_t lb_rule; + uint8_t ip_addr_type; + /* invalid */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \ + UINT32_C(0x0) + /* IPv4 */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ + UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + UINT32_C(0x6) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 /* - * RSS ID for load balancing rule/table structure. 0xFF... (All - * Fs) if there is no LB rule. + * The value of protocol filed in IP header. + * Applies to UDP and TCP traffic. + * 6 - TCP + * 17 - UDP */ - uint16_t mru; + uint8_t ip_protocol; + /* invalid */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ + UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \ + UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \ + UINT32_C(0x11) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP /* - * The maximum receive unit of the vnic. Each vnic is associated - * with a function. The vnic mru value overwrites the mru - * setting of the associated function. The HWRM shall make sure - * that vnic mru does not exceed the mru of the port the - * function is associated with. + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. */ - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_cfg_output { - uint16_t error_code; + uint16_t dst_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t mirror_vnic_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This value indicates the tunnel type for this filter. + * If this field is not specified, then the filter shall + * apply to both non-tunneled and tunneled packets. + * If this field conflicts with the tunnel_type specified + * in the l2_filter_id, then the HWRM shall return an + * error for this command. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This hint is provided to help in placing + * the filter in the filter table. */ -} __attribute__((packed)); - -/* hwrm_vnic_qcfg */ -/* - * Description: Query the RX VNIC structure. This function can be used by a PF - * driver to query its own VNIC resource or VNIC resource of its child VF. This - * function can also be used by a VF driver to query its own VNIC resource. - */ -/* Input (32 bytes) */ -struct hwrm_vnic_qcfg_input { - uint16_t req_type; + uint8_t pri_hint; + /* No preference */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ + UINT32_C(0x0) + /* Above the given filter */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE \ + UINT32_C(0x1) + /* Below the given filter */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW \ + UINT32_C(0x2) + /* As high as possible */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_HIGHEST \ + UINT32_C(0x3) + /* As low as possible */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST \ + UINT32_C(0x4) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The value of source IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - uint16_t cmpl_ring; + uint32_t src_ipaddr[4]; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The value of source IP address mask to be used in + * filtering. + * For IPv4, first four bytes represent the IP address mask. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint32_t src_ipaddr_mask[4]; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The value of destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - uint64_t resp_addr; + uint32_t dst_ipaddr[4]; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The value of destination IP address mask to be used in + * filtering. + * For IPv4, first four bytes represent the IP address mask. */ - uint32_t enables; - /* This bit must be '1' for the vf_id_valid field to be configured. */ - #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1) - uint32_t vnic_id; - /* Logical vnic ID */ - uint16_t vf_id; - /* ID of Virtual Function whose VNIC resource is being queried. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (32 bytes) */ -struct hwrm_vnic_qcfg_output { - uint16_t error_code; + uint32_t dst_ipaddr_mask[4]; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The value of source port to be used in filtering. + * Applies to UDP and TCP traffic. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t src_port; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The value of source port mask to be used in filtering. + * Applies to UDP and TCP traffic. */ - uint16_t dflt_ring_grp; - /* Default Completion ring for the VNIC. */ - uint16_t rss_rule; + uint16_t src_port_mask; /* - * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if - * there is no RSS rule. + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. */ - uint16_t cos_rule; + uint16_t dst_port; /* - * RSS ID for COS rule/table structure. 0xFF... (All Fs) if - * there is no COS rule. + * The value of destination port mask to be used in + * filtering. + * Applies to UDP and TCP traffic. */ - uint16_t lb_rule; + uint16_t dst_port_mask; /* - * RSS ID for load balancing rule/table structure. 0xFF... (All - * Fs) if there is no LB rule. + * This is the ID of the filter that goes along with + * the pri_hint. */ - uint16_t mru; - /* The maximum receive unit of the vnic. */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t flags; + uint64_t ntuple_filter_id_hint; +} __attribute__((packed)); + +/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint64_t ntuple_filter_id; /* - * When this bit is '1', the VNIC is the default VNIC for the - * function. + * This is the ID of the flow associated with this + * filter. + * This value shall be used to match and associate the + * flow identifier returned in completion records. + * A value of 0xFFFFFFFF shall indicate no flow id. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT UINT32_C(0x1) + uint32_t flow_id; + uint8_t unused_0[3]; /* - * When this bit is '1', the VNIC is configured to strip VLAN in - * the RX path. If set to '0', then VLAN stripping is disabled - * on this VNIC. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2) + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_ntuple_filter_alloc_cmd_err { /* - * When this bit is '1', the VNIC is configured to buffer - * receive packets in the hardware until the host posts new - * receive buffers. If set to '0', then bd_stall is disabled on - * this VNIC. + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4) + uint8_t code; + /* Unknown error */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN \ + UINT32_C(0x0) + /* Unable to complete operation due to conflict with Rx Mask VLAN */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \ + UINT32_C(0x1) + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR + uint8_t unused_0[7]; +} __attribute__((packed)); + +/******************************* + * hwrm_cfa_ntuple_filter_free * + *******************************/ + + +/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When this bit is '1', the VNIC is configured to receive both - * RoCE and non-RoCE traffic. If set to '0', then this VNIC is - * not configured to operate in dual VNIC mode. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8) + uint16_t cmpl_ring; /* - * When this flag is set to '1', the VNIC is configured to - * receive only RoCE traffic. When this flag is set to '0', the - * VNIC is not configured to receive only RoCE traffic. If - * roce_dual_vnic_mode flag and this flag both are set to '1', - * then it is an invalid configuration of the VNIC. The HWRM - * should not allow that type of mis-configuration by HWRM - * clients. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10) + uint16_t seq_id; /* - * When a VNIC uses one destination ring group for certain - * application (e.g. Receive Flow Steering) where exact match is - * used to direct packets to a VNIC with one destination ring - * group only, there is no need to configure RSS indirection - * table for that VNIC as only one destination ring group is - * used. When this bit is set to '1', then the VNIC is enabled - * in a mode where RSS is enabled in the VNIC using a RSS - * context for computing RSS hash but the RSS indirection table - * is not configured. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20) + uint16_t target_id; /* - * When this bit is '1', the VNIC is configured to receive both - * RoCE and non-RoCE traffic, but forward only RoCE traffic - * further. Also RoCE traffic can be mirrored to L2 driver. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ - UINT32_C(0x40) - uint32_t unused_2; - uint8_t unused_3; - uint8_t unused_4; - uint8_t unused_5; - uint8_t valid; + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint64_t ntuple_filter_id; +} __attribute__((packed)); + +/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); +/****************************** + * hwrm_cfa_ntuple_filter_cfg * + ******************************/ -/* hwrm_vnic_tpa_cfg */ -/* Description: This function is used to enable/configure TPA on the VNIC. */ -/* Input (40 bytes) */ -struct hwrm_vnic_tpa_cfg_input { - uint16_t req_type; + +/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */ +struct hwrm_cfa_ntuple_filter_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t flags; + uint64_t resp_addr; + uint32_t enables; /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) of non-tunneled TCP - * packets. + * This bit must be '1' for the new_dst_id field to be + * configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \ + UINT32_C(0x1) /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) of tunneled TCP packets. + * This bit must be '1' for the new_mirror_vnic_id field to be + * configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) according to Windows - * Receive Segment Coalescing (RSC) rules. + * This bit must be '1' for the new_meter_instance_id field to be + * configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ + UINT32_C(0x4) + uint8_t unused_0[4]; + /* This value is an opaque id into CFA data structures. */ + uint64_t ntuple_filter_id; /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) according to Linux - * Generic Receive Offload (GRO) rules. + * If set, this value shall represent the new + * Logical VNIC ID of the destination VNIC for the RX + * path and new network port id of the destination port for + * the TX path. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8) + uint32_t new_dst_id; /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) for TCP packets with IP - * ECN set to non-zero. + * New Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10) + uint32_t new_mirror_vnic_id; /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) for GRE tunneled TCP - * packets only if all packets have the same GRE sequence. + * New meter to attach to the flow. Specifying the + * invalid instance ID is used to remove any existing + * meter from the flow. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \ - UINT32_C(0x20) + uint16_t new_meter_instance_id; /* - * When this bit is '1' and the GRO mode is enabled, the VNIC - * shall be configured to perform transparent packet aggregation - * (TPA) for TCP/IPv4 packets with consecutively increasing - * IPIDs. In other words, the last packet that is being - * aggregated to an already existing aggregation context shall - * have IPID 1 more than the IPID of the last packet that was - * aggregated in that aggregation context. + * A value of 0xfff is considered invalid and implies the + * instance is not configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \ + HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID + uint8_t unused_1[6]; +} __attribute__((packed)); + +/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * When this bit is '1' and the GRO mode is enabled, the VNIC - * shall be configured to perform transparent packet aggregation - * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit - * (IPv6) value. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80) - uint32_t enables; - /* This bit must be '1' for the max_agg_segs field to be configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1) - /* This bit must be '1' for the max_aggs field to be configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2) + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_cfa_em_flow_alloc * + **************************/ + + +/* hwrm_cfa_em_flow_alloc_input (size:896b/112B) */ +struct hwrm_cfa_em_flow_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This bit must be '1' for the max_agg_timer field to be - * configured. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4) - /* This bit must be '1' for the min_agg_len field to be configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8) - uint16_t vnic_id; - /* Logical vnic ID */ - uint16_t max_agg_segs; + uint16_t cmpl_ring; /* - * This is the maximum number of TCP segments that can be - * aggregated (unit is Log2). Max value is 31. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - /* 1 segment */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0) - /* 2 segments */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1) - /* 4 segments */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2) - /* 8 segments */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3) - /* Any segment size larger than this is not valid */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f) - uint16_t max_aggs; + uint16_t seq_id; /* - * This is the maximum number of aggregations this VNIC is - * allowed (unit is Log2). Max value is 7 + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - /* 1 aggregation */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0) - /* 2 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1) - /* 4 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2) - /* 8 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3) - /* 16 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4) - /* Any aggregation size larger than this is not valid */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7) - uint8_t unused_0; - uint8_t unused_1; - uint32_t max_agg_timer; + uint16_t target_id; /* - * This is the maximum amount of time allowed for an aggregation - * context to complete after it was initiated. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t min_agg_len; + uint64_t resp_addr; + uint32_t flags; /* - * This is the minimum amount of payload length required to - * start an aggregation context. + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_tpa_cfg_output { - uint16_t error_code; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Setting of this flag indicates enabling of a byte counter for a given + * flow. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Setting of this flag indicates enabling of a packet counter for a given + * flow. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4) + /* Setting of this flag indicates de-capsulation action for the given flow. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8) + /* Setting of this flag indicates encapsulation action for the given flow. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. */ -} __attribute__((packed)); - -/* hwrm_vnic_rss_cfg */ -/* Description: This function is used to enable RSS configuration. */ -/* Input (48 bytes) */ -struct hwrm_vnic_rss_cfg_input { - uint16_t req_type; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Setting of this flag indicates that a meter is expected to be attached + * to this flow. This hint can be used when choosing the action record + * format required for the flow. */ - uint16_t cmpl_ring; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40) + uint32_t enables; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the l2_filter_id field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ + UINT32_C(0x1) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the tunnel_type field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x2) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the tunnel_id field to be + * configured. */ - uint32_t hash_type; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID \ + UINT32_C(0x4) /* - * When this bit is '1', the RSS hash shall be computed over - * source and destination IPv4 addresses of IPv4 packets. + * This bit must be '1' for the src_macaddr field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR \ + UINT32_C(0x8) /* - * When this bit is '1', the RSS hash shall be computed over - * source/destination IPv4 addresses and source/destination - * ports of TCP/IPv4 packets. + * This bit must be '1' for the dst_macaddr field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR \ + UINT32_C(0x10) /* - * When this bit is '1', the RSS hash shall be computed over - * source/destination IPv4 addresses and source/destination - * ports of UDP/IPv4 packets. + * This bit must be '1' for the ovlan_vid field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID \ + UINT32_C(0x20) /* - * When this bit is '1', the RSS hash shall be computed over - * source and destination IPv4 addresses of IPv6 packets. + * This bit must be '1' for the ivlan_vid field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID \ + UINT32_C(0x40) /* - * When this bit is '1', the RSS hash shall be computed over - * source/destination IPv6 addresses and source/destination - * ports of TCP/IPv6 packets. + * This bit must be '1' for the ethertype field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE \ + UINT32_C(0x80) /* - * When this bit is '1', the RSS hash shall be computed over - * source/destination IPv6 addresses and source/destination - * ports of UDP/IPv6 packets. + * This bit must be '1' for the src_ipaddr field to be + * configured. */ - #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20) - uint32_t unused_0; - uint64_t ring_grp_tbl_addr; - /* This is the address for rss ring group table */ - uint64_t hash_key_tbl_addr; - /* This is the address for rss hash key table */ - uint16_t rss_ctx_idx; - /* Index to the rss indirection table. */ - uint16_t unused_1[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_rss_cfg_output { - uint16_t error_code; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR \ + UINT32_C(0x100) /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This bit must be '1' for the dst_ipaddr field to be + * configured. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR \ + UINT32_C(0x200) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This bit must be '1' for the ipaddr_type field to be + * configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ + UINT32_C(0x400) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the ip_protocol field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_vnic_plcmodes_cfg */ -/* - * Description: This function can be used to set placement mode configuration of - * the VNIC. - */ -/* Input (40 bytes) */ -struct hwrm_vnic_plcmodes_cfg_input { - uint16_t req_type; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ + UINT32_C(0x800) /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. + * This bit must be '1' for the src_port field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT \ + UINT32_C(0x1000) /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. + * This bit must be '1' for the dst_port field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT \ + UINT32_C(0x2000) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + * This bit must be '1' for the dst_id field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x4000) /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. + * This bit must be '1' for the mirror_vnic_id field to be + * configured. */ - uint32_t flags; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x8000) /* - * When this bit is '1', the VNIC shall be configured to use regular - * placement algorithm. By default, the regular placement algorithm - * shall be enabled on the VNIC. + * This bit must be '1' for the encap_record_id field to be + * configured. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \ - UINT32_C(0x1) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \ + UINT32_C(0x10000) /* - * When this bit is '1', the VNIC shall be configured use the jumbo - * placement algorithm. + * This bit must be '1' for the meter_instance_id field to be + * configured. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \ - UINT32_C(0x2) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \ + UINT32_C(0x20000) /* - * When this bit is '1', the VNIC shall be configured to enable Header- - * Data split for IPv4 packets according to the following rules: # If - * the packet is identified as TCP/IPv4, then the packet is split at the - * beginning of the TCP payload. # If the packet is identified as - * UDP/IPv4, then the packet is split at the beginning of UDP payload. # - * If the packet is identified as non-TCP and non-UDP IPv4 packet, then - * the packet is split at the beginning of the upper layer protocol - * header carried in the IPv4 packet. + * This value identifies a set of CFA data structures used for an L2 + * context. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 UINT32_C(0x4) + uint64_t l2_filter_id; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0[3]; /* - * When this bit is '1', the VNIC shall be configured to enable Header- - * Data split for IPv6 packets according to the following rules: # If - * the packet is identified as TCP/IPv6, then the packet is split at the - * beginning of the TCP payload. # If the packet is identified as - * UDP/IPv6, then the packet is split at the beginning of UDP payload. # - * If the packet is identified as non-TCP and non-UDP IPv6 packet, then - * the packet is split at the beginning of the upper layer protocol - * header carried in the IPv6 packet. + * Tunnel identifier. + * Virtual Network Identifier (VNI). Only valid with + * tunnel_types VXLAN, NVGRE, and Geneve. + * Only lower 24-bits of VNI field are used + * in setting up the filter. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 UINT32_C(0x8) + uint32_t tunnel_id; /* - * When this bit is '1', the VNIC shall be configured to enable Header- - * Data split for FCoE packets at the beginning of FC payload. + * This value indicates the source MAC address in + * the Ethernet header. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE UINT32_C(0x10) + uint8_t src_macaddr[6]; + /* The meter instance to attach to the flow. */ + uint16_t meter_instance_id; /* - * When this bit is '1', the VNIC shall be configured to enable Header- - * Data split for RoCE packets at the beginning of RoCE payload (after - * BTH/GRH headers). + * A value of 0xfff is considered invalid and implies the + * instance is not configured. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE UINT32_C(0x20) - uint32_t enables; + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID /* - * This bit must be '1' for the jumbo_thresh_valid field to be - * configured. + * This value indicates the destination MAC address in + * the Ethernet header. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \ - UINT32_C(0x1) + uint8_t dst_macaddr[6]; /* - * This bit must be '1' for the hds_offset_valid field to be configured. + * This value indicates the VLAN ID of the outer VLAN tag + * in the Ethernet header. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \ - UINT32_C(0x2) + uint16_t ovlan_vid; /* - * This bit must be '1' for the hds_threshold_valid field to be - * configured. + * This value indicates the VLAN ID of the inner VLAN tag + * in the Ethernet header. */ - #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \ - UINT32_C(0x4) - uint32_t vnic_id; - /* Logical vnic ID */ - uint16_t jumbo_thresh; + uint16_t ivlan_vid; + /* This value indicates the ethertype in the Ethernet header. */ + uint16_t ethertype; /* - * When jumbo placement algorithm is enabled, this value is used to - * determine the threshold for jumbo placement. Packets with length - * larger than this value will be placed according to the jumbo - * placement algorithm. + * This value indicates the type of IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. */ - uint16_t hds_offset; + uint8_t ip_addr_type; + /* invalid */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN UINT32_C(0x0) + /* IPv4 */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 /* - * This value is used to determine the offset into packet buffer where - * the split data (payload) will be placed according to one of of HDS - * placement algorithm. The lengths of packet buffers provided for split - * data shall be larger than this value. + * The value of protocol filed in IP header. + * Applies to UDP and TCP traffic. + * 6 - TCP + * 17 - UDP */ - uint16_t hds_threshold; + uint8_t ip_protocol; + /* invalid */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_LAST \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP + uint8_t unused_1[2]; /* - * When one of the HDS placement algorithm is enabled, this value is - * used to determine the threshold for HDS placement. Packets with - * length larger than this value will be placed according to the HDS - * placement algorithm. This value shall be in multiple of 4 bytes. + * The value of source IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_plcmodes_cfg_output { - uint16_t error_code; + uint32_t src_ipaddr[4]; /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate + * big_endian = True + * The value of destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint32_t dst_ipaddr[4]; /* - * This field is the length of the response in bytes. The last byte of - * the response is a valid flag that will read as '1' when the command - * has been completely written to memory. + * The value of source port to be used in filtering. + * Applies to UDP and TCP traffic. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t src_port; /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. */ -} __attribute__((packed)); - -/* hwrm_vnic_plcmodes_qcfg */ -/* - * Description: This function can be used to query placement mode configuration - * of the VNIC. - */ -/* Input (24 bytes) */ -struct hwrm_vnic_plcmodes_qcfg_input { - uint16_t req_type; + uint16_t dst_port; /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. */ - uint16_t cmpl_ring; + uint16_t dst_id; /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. + * Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t mirror_vnic_id; + /* Logical ID of the encapsulation record. */ + uint32_t encap_record_id; + uint8_t unused_2[4]; +} __attribute__((packed)); + +/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */ +struct hwrm_cfa_em_flow_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint64_t em_filter_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + * This is the ID of the flow associated with this + * filter. + * This value shall be used to match and associate the + * flow identifier returned in completion records. + * A value of 0xFFFFFFFF shall indicate no flow id. */ - uint64_t resp_addr; + uint32_t flow_id; + uint8_t unused_0[3]; /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint32_t vnic_id; - /* Logical vnic ID */ - uint32_t unused_0; + uint8_t valid; } __attribute__((packed)); -/* Output (24 bytes) */ -struct hwrm_vnic_plcmodes_qcfg_output { - uint16_t error_code; +/************************* + * hwrm_cfa_em_flow_free * + *************************/ + + +/* hwrm_cfa_em_flow_free_input (size:192b/24B) */ +struct hwrm_cfa_em_flow_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last byte of - * the response is a valid flag that will read as '1' when the command - * has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t flags; + uint16_t seq_id; /* - * When this bit is '1', the VNIC is configured to use regular placement - * algorithm. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \ - UINT32_C(0x1) + uint16_t target_id; /* - * When this bit is '1', the VNIC is configured to use the jumbo - * placement algorithm. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \ - UINT32_C(0x2) + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint64_t em_filter_id; +} __attribute__((packed)); + +/* hwrm_cfa_em_flow_free_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * When this bit is '1', the VNIC is configured to enable Header-Data - * split for IPv4 packets. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 UINT32_C(0x4) + uint8_t valid; +} __attribute__((packed)); + +/************************ + * hwrm_cfa_em_flow_cfg * + ************************/ + + +/* hwrm_cfa_em_flow_cfg_input (size:384b/48B) */ +struct hwrm_cfa_em_flow_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When this bit is '1', the VNIC is configured to enable Header-Data - * split for IPv6 packets. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 UINT32_C(0x8) + uint16_t cmpl_ring; /* - * When this bit is '1', the VNIC is configured to enable Header-Data - * split for FCoE packets. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE UINT32_C(0x10) + uint16_t seq_id; /* - * When this bit is '1', the VNIC is configured to enable Header-Data - * split for RoCE packets. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE UINT32_C(0x20) + uint16_t target_id; /* - * When this bit is '1', the VNIC is configured to be the default VNIC - * of the requesting function. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC UINT32_C(0x40) - uint16_t jumbo_thresh; + uint64_t resp_addr; + uint32_t enables; /* - * When jumbo placement algorithm is enabled, this value is used to - * determine the threshold for jumbo placement. Packets with length - * larger than this value will be placed according to the jumbo - * placement algorithm. + * This bit must be '1' for the new_dst_id field to be + * configured. */ - uint16_t hds_offset; + #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID \ + UINT32_C(0x1) /* - * This value is used to determine the offset into packet buffer where - * the split data (payload) will be placed according to one of of HDS - * placement algorithm. The lengths of packet buffers provided for split - * data shall be larger than this value. + * This bit must be '1' for the new_mirror_vnic_id field to be + * configured. */ - uint16_t hds_threshold; + #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) /* - * When one of the HDS placement algorithm is enabled, this value is - * used to determine the threshold for HDS placement. Packets with - * length larger than this value will be placed according to the HDS - * placement algorithm. This value shall be in multiple of 4 bytes. + * This bit must be '1' for the new_meter_instance_id field to be + * configured. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t unused_4; - uint8_t valid; + #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ + UINT32_C(0x4) + uint8_t unused_0[4]; + /* This value is an opaque id into CFA data structures. */ + uint64_t em_filter_id; /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. + * If set, this value shall represent the new + * Logical VNIC ID of the destination VNIC for the RX + * path and network port id of the destination port for + * the TX path. */ -} __attribute__((packed)); - -/* hwrm_vnic_rss_cos_lb_ctx_alloc */ -/* Description: This function is used to allocate COS/Load Balance context. */ -/* Input (16 bytes) */ -struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { - uint16_t req_type; + uint32_t new_dst_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * New Logical VNIC ID of the VNIC where traffic is + * mirrored. */ - uint16_t cmpl_ring; + uint32_t new_mirror_vnic_id; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * New meter to attach to the flow. Specifying the + * invalid instance ID is used to remove any existing + * meter from the flow. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t new_meter_instance_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * A value of 0xfff is considered invalid and implies the + * instance is not configured. */ - uint64_t resp_addr; + #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \ + HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID + uint8_t unused_1[6]; +} __attribute__((packed)); + +/* hwrm_cfa_em_flow_cfg_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { - uint16_t error_code; +/******************************** + * hwrm_cfa_meter_profile_alloc * + ********************************/ + + +/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t rss_cos_lb_ctx_id; - /* rss_cos_lb_ctx_id is 16 b */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t unused_4; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_vnic_rss_cos_lb_ctx_free */ -/* Description: This function can be used to free COS/Load Balance context. */ -/* Input (24 bytes) */ -struct hwrm_vnic_rss_cos_lb_ctx_free_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + uint8_t flags; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX + /* The meter algorithm type. */ + uint8_t meter_type; + /* RFC 2697 (srTCM) */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2697 \ + UINT32_C(0x0) + /* RFC 2698 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2698 \ + UINT32_C(0x1) + /* RFC 4115 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115 \ + UINT32_C(0x2) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115 /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This field is reserved for the future use. + * It shall be set to 0. */ - uint64_t resp_addr; + uint16_t reserved1; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This field is reserved for the future use. + * It shall be set to 0. */ - uint16_t rss_cos_lb_ctx_id; - /* rss_cos_lb_ctx_id is 16 b */ - uint16_t unused_0[3]; + uint32_t reserved2; + /* A meter rate specified in bytes-per-second. */ + uint32_t commit_rate; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID + /* A meter burst size specified in bytes. */ + uint32_t commit_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID + /* A meter rate specified in bytes-per-second. */ + uint32_t excess_peak_rate; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID + /* A meter burst size specified in bytes. */ + uint32_t excess_peak_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_vnic_rss_cos_lb_ctx_free_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; +/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * A value of 0xfff is considered invalid and implies the + * profile is not configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID + uint8_t unused_0[5]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_ring_alloc */ -/* - * Description: This command allocates and does basic preparation for a ring. - */ -/* Input (80 bytes) */ -struct hwrm_ring_alloc_input { - uint16_t req_type; - /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. - */ - uint16_t cmpl_ring; +/******************************* + * hwrm_cfa_meter_profile_free * + *******************************/ + + +/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_profile_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t enables; - /* This bit must be '1' for the Reserved1 field to be configured. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED1 UINT32_C(0x1) - /* This bit must be '1' for the ring_arb_cfg field to be configured. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG UINT32_C(0x2) - /* This bit must be '1' for the Reserved3 field to be configured. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED3 UINT32_C(0x4) + uint16_t target_id; /* - * This bit must be '1' for the stat_ctx_id_valid field to be - * configured. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID UINT32_C(0x8) - /* This bit must be '1' for the Reserved4 field to be configured. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED4 UINT32_C(0x10) - /* This bit must be '1' for the max_bw_valid field to be configured. */ - #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20) - uint8_t ring_type; - /* Ring Type. */ - /* L2 Completion Ring (CR) */ - #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) - /* TX Ring (TR) */ - #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1) - /* RX Ring (RR) */ - #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2) - /* RoCE Notification Completion Ring (ROCE_CR) */ - #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) - uint8_t unused_0; - uint16_t unused_1; - uint64_t page_tbl_addr; - /* This value is a pointer to the page table for the Ring. */ - uint32_t fbo; - /* First Byte Offset of the first entry in the first page. */ - uint8_t page_size; + uint64_t resp_addr; + uint8_t flags; /* - * Actual page size in 2^page_size. The supported range is - * increments in powers of 2 from 16 bytes to 1GB. - 4 = 16 B - * Page size is 16 B. - 12 = 4 KB Page size is 4 KB. - 13 = 8 KB - * Page size is 8 KB. - 16 = 64 KB Page size is 64 KB. - 21 = 2 - * MB Page size is 2 MB. - 22 = 4 MB Page size is 4 MB. - 30 = 1 - * GB Page size is 1 GB. + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - uint8_t page_tbl_depth; + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX + uint8_t unused_0; + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; /* - * This value indicates the depth of page table. For this - * version of the specification, value other than 0 or 1 shall - * be considered as an invalid value. When the page_tbl_depth = - * 0, then it is treated as a special case with the following. - * 1. FBO and page size fields are not valid. 2. page_tbl_addr - * is the physical address of the first element of the ring. + * A value of 0xfff is considered invalid and implies the + * profile is not configured. */ - uint8_t unused_2; - uint8_t unused_3; - uint32_t length; + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Number of 16B units in the ring. Minimum size for a ring is - * 16 16B entries. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t logical_id; + uint8_t valid; +} __attribute__((packed)); + +/****************************** + * hwrm_cfa_meter_profile_cfg * + ******************************/ + + +/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_cfg_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Logical ring number for the ring to be allocated. This value - * determines the position in the doorbell area where the update - * to the ring will be made. For completion rings, this value is - * also the MSI-X vector number for the function the completion - * ring is associated with. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring_id; + uint16_t cmpl_ring; /* - * This field is used only when ring_type is a TX ring. This - * value indicates what completion ring the TX ring is - * associated with. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t queue_id; + uint16_t seq_id; /* - * This field is used only when ring_type is a TX ring. This - * value indicates what CoS queue the TX ring is associated - * with. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t unused_4; - uint8_t unused_5; - uint32_t reserved1; - /* This field is reserved for the future use. It shall be set to 0. */ - uint16_t ring_arb_cfg; + uint16_t target_id; /* - * This field is used only when ring_type is a TX ring. This - * field is used to configure arbitration related parameters for - * a TX ring. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* Arbitration policy used for the ring. */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK UINT32_C(0xf) - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0 + uint64_t resp_addr; + uint8_t flags; /* - * Use strict priority for the TX ring. Priority - * value is specified in arb_policy_param + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \ - (UINT32_C(0x1) << 0) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX + /* The meter algorithm type. */ + uint8_t meter_type; + /* RFC 2697 (srTCM) */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2697 \ + UINT32_C(0x0) + /* RFC 2698 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2698 \ + UINT32_C(0x1) + /* RFC 4115 (trTCM) */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115 \ + UINT32_C(0x2) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115 + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; /* - * Use weighted fair queue arbitration for the - * TX ring. Weight is specified in - * arb_policy_param - */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \ - (UINT32_C(0x2) << 0) - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \ - RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ - /* Reserved field. */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK UINT32_C(0xf0) - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4 - /* - * Arbitration policy specific parameter. # For strict priority - * arbitration policy, this field represents a priority value. - * If set to 0, then the priority is not specified and the HWRM - * is allowed to select any priority for this TX ring. # For - * weighted fair queue arbitration policy, this field represents - * a weight value. If set to 0, then the weight is not specified - * and the HWRM is allowed to select any weight for this TX - * ring. - */ - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \ - UINT32_C(0xff00) - #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 - uint8_t unused_6; - uint8_t unused_7; - uint32_t reserved3; - /* This field is reserved for the future use. It shall be set to 0. */ - uint32_t stat_ctx_id; - /* - * This field is used only when ring_type is a TX ring. This - * input indicates what statistics context this ring should be - * associated with. + * A value of 0xfff is considered invalid and implies the + * profile is not configured. */ - uint32_t reserved4; - /* This field is reserved for the future use. It shall be set to 0. */ - uint32_t max_bw; + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID /* - * This field is used only when ring_type is a TX ring to - * specify maximum BW allocated to the TX ring. The HWRM will - * translate this value into byte counter and time interval used - * for this ring inside the device. + * This field is reserved for the future use. + * It shall be set to 0. */ + uint32_t reserved; + /* A meter rate specified in bytes-per-second. */ + uint32_t commit_rate; /* The bandwidth value. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff) - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0 - /* The granularity of the value (bits or bytes). */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE UINT32_C(0x10000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE \ + UINT32_C(0x10000000) /* Value is in bits. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) /* Value is in bytes. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28) - #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \ - RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES /* bw_value_unit is 3 b */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \ UINT32_C(0xe0000000) - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29 - /* Value is in Mb or MB (base 10). */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \ (UINT32_C(0x0) << 29) - /* Value is in Kb or KB (base 10). */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \ + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \ (UINT32_C(0x2) << 29) /* Value is in bits or bytes. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \ (UINT32_C(0x4) << 29) - /* Value is in Gb or GB (base 10). */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \ + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \ (UINT32_C(0x6) << 29) /* Value is in 1/100th of a percentage of total bandwidth. */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \ (UINT32_C(0x1) << 29) /* Invalid unit */ - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \ (UINT32_C(0x7) << 29) - #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \ - RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID - uint8_t int_mode; - /* - * This field is used only when ring_type is a Completion ring. - * This value indicates what interrupt mode should be used on - * this completion ring. Note: In the legacy interrupt mode, no - * more than 16 completion rings are allowed. - */ - /* Legacy INTA */ - #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0) - /* Reserved */ - #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1) - /* MSI-X */ - #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2) - /* No Interrupt - Polled mode */ - #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3) - uint8_t unused_8[3]; + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID + /* A meter burst size specified in bytes. */ + uint32_t commit_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID + /* A meter rate specified in bytes-per-second. */ + uint32_t excess_peak_rate; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID + /* A meter burst size specified in bytes. */ + uint32_t excess_peak_burst; + /* The bandwidth value. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \ + UINT32_C(0xfffffff) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \ + 0 + /* The granularity of the value (bits or bytes). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE \ + UINT32_C(0x10000000) + /* Value is in bits. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \ + (UINT32_C(0x0) << 28) + /* Value is in bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \ + (UINT32_C(0x1) << 28) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES + /* bw_value_unit is 3 b */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \ + UINT32_C(0xe0000000) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \ + 29 + /* Value is in Mb or MB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \ + (UINT32_C(0x0) << 29) + /* Value is in Kb or KB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \ + (UINT32_C(0x2) << 29) + /* Value is in bits or bytes. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \ + (UINT32_C(0x4) << 29) + /* Value is in Gb or GB (base 10). */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \ + (UINT32_C(0x6) << 29) + /* Value is in 1/100th of a percentage of total bandwidth. */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \ + (UINT32_C(0x1) << 29) + /* Invalid unit */ + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \ + (UINT32_C(0x7) << 29) + #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \ + HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_ring_alloc_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; +/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_cfg_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t ring_id; + uint8_t valid; +} __attribute__((packed)); + +/********************************* + * hwrm_cfa_meter_instance_alloc * + *********************************/ + + +/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Physical number of ring allocated. This value shall be unique - * for a ring type. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t logical_ring_id; - /* Logical number of ring allocated. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint16_t cmpl_ring; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ -} __attribute__((packed)); - -/* hwrm_ring_free */ -/* - * Description: This command is used to free a ring and associated resources. - * With QoS and DCBx agents, it is possible the traffic classes will be moved - * from one CoS queue to another. When this occurs, the driver shall call - * 'hwrm_ring_free' to free the allocated rings and then call 'hwrm_ring_alloc' - * to re-allocate each ring and assign it to a new CoS queue. hwrm_ring_free - * shall be called on a ring only after it has been idle for 500ms or more and - * no frames have been posted to the ring during this time. All frames queued - * for transmission shall be completed and at least 500ms time elapsed from the - * last completion before calling this command. - */ -/* Input (24 bytes) */ -struct hwrm_ring_free_input { - uint16_t req_type; + uint16_t seq_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t cmpl_ring; + uint16_t target_id; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint64_t resp_addr; + uint8_t flags; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - uint64_t resp_addr; + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH \ + UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX + uint8_t unused_0; + /* This value identifies a meter profile in CFA. */ + uint16_t meter_profile_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A value of 0xfff is considered invalid and implies the + * profile is not configured. */ - uint8_t ring_type; - /* Ring Type. */ - /* L2 Completion Ring (CR) */ - #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0) - /* TX Ring (TR) */ - #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1) - /* RX Ring (RR) */ - #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2) - /* RoCE Notification Completion Ring (ROCE_CR) */ - #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3) - uint8_t unused_0; - uint16_t ring_id; - /* Physical number of ring allocated. */ - uint32_t unused_1; + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_LAST \ + HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID + uint8_t unused_1[4]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_ring_free_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; +/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value identifies a meter instance in CFA. */ + uint16_t meter_instance_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * A value of 0xfff is considered invalid and implies the + * instance is not configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_LAST \ + HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID + uint8_t unused_0[5]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_ring_grp_alloc */ -/* - * Description: This API allocates and does basic preparation for a ring group. - */ -/* Input (24 bytes) */ -struct hwrm_ring_grp_alloc_input { - uint16_t req_type; +/******************************** + * hwrm_cfa_meter_instance_free * + ********************************/ + + +/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cr; - /* This value identifies the CR associated with the ring group. */ - uint16_t rr; - /* This value identifies the main RR associated with the ring group. */ - uint16_t ar; + uint64_t resp_addr; + uint8_t flags; /* - * This value identifies the aggregation RR associated with the - * ring group. If this value is 0xFF... (All Fs), then no - * Aggregation ring will be set. + * Enumeration denoting the RX, TX type of the resource. + * This enumeration is used for resources that are similar for both + * TX and RX paths of the chip. */ - uint16_t sc; + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_TX \ + UINT32_C(0x0) + /* rx path */ + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX \ + UINT32_C(0x1) + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX + uint8_t unused_0; + /* This value identifies a meter instance in CFA. */ + uint16_t meter_instance_id; /* - * This value identifies the statistics context associated with - * the ring group. + * A value of 0xfff is considered invalid and implies the + * instance is not configured. */ + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_LAST \ + HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID + uint8_t unused_1[4]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_ring_grp_alloc_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint32_t ring_group_id; - /* - * This is the ring group ID value. Use this value to program - * the default ring group for the VNIC or as table entries in an - * RSS/COS context. - */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; +/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_ring_grp_free */ -/* - * Description: This API frees a ring group and associated resources. # If a - * ring in the ring group is reset or free, then the associated rings in the - * ring group shall also be reset/free using hwrm_ring_free. # A function driver - * shall always use hwrm_ring_grp_free after freeing all rings in a group. # As - * a part of executing this command, the HWRM shall reset all associated ring - * group resources. - */ -/* Input (24 bytes) */ -struct hwrm_ring_grp_free_input { - uint16_t req_type; +/******************************* + * hwrm_cfa_decap_filter_alloc * + *******************************/ + + +/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */ +struct hwrm_cfa_decap_filter_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t ring_group_id; - /* This is the ring group ID value. */ - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_ring_grp_free_output { - uint16_t error_code; + uint64_t resp_addr; + uint32_t flags; + /* ovs_tunnel is 1 b */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_FLAGS_OVS_TUNNEL \ + UINT32_C(0x1) + uint32_t enables; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This bit must be '1' for the tunnel_type field to be + * configured. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x1) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This bit must be '1' for the tunnel_id field to be + * configured. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_ID \ + UINT32_C(0x2) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This bit must be '1' for the src_macaddr field to be + * configured. */ -} __attribute__((packed)); - -/* hwrm_cfa_l2_filter_alloc */ -/* - * Description: An L2 filter is a filter resource that is used to identify a - * vnic or ring for a packet based on layer 2 fields. Layer 2 fields for - * encapsulated packets include both outer L2 header and/or inner l2 header of - * encapsulated packet. The L2 filter resource covers the following OS specific - * L2 filters. Linux/FreeBSD (per function): # Broadcast enable/disable # List - * of individual multicast filters # All multicast enable/disable filter # - * Unicast filters # Promiscuous mode VMware: # Broadcast enable/disable (per - * physical function) # All multicast enable/disable (per function) # Unicast - * filters per ring or vnic # Promiscuous mode per PF Windows: # Broadcast - * enable/disable (per physical function) # List of individual multicast filters - * (Driver needs to advertise the maximum number of filters supported) # All - * multicast enable/disable per physical function # Unicast filters per vnic # - * Promiscuous mode per PF Implementation notes on the use of VNIC in this - * command: # By default, these filters belong to default vnic for the function. - * # Once these filters are set up, only destination VNIC can be modified. # If - * the destination VNIC is not specified in this command, then the HWRM shall - * only create an l2 context id. HWRM Implementation notes for multicast - * filters: # The hwrm_filter_alloc command can be used to set up multicast - * filters (perfect match or partial match). Each individual function driver can - * set up multicast filters independently. # The HWRM needs to keep track of - * multicast filters set up by function drivers and maintain multicast group - * replication records to enable a subset of functions to receive traffic for a - * specific multicast address. # When a specific multicast filter cannot be set, - * the HWRM shall return an error. In this error case, the driver should fall - * back to using one general filter (rather than specific) for all multicast - * traffic. # When the SR-IOV is enabled, the HWRM needs to additionally track - * source knockout per multicast group record. Examples of setting unicast - * filters: For a unicast MAC based filter, one can use a combination of the - * fields and masks provided in this command to set up the filter. Below are - * some examples: # MAC + no VLAN filter: This filter is used to identify - * traffic that does not contain any VLAN tags and matches destination (or - * source) MAC address. This filter can be set up by setting only l2_addr field - * to be a valid field. All other fields are not valid. The following value is - * set for l2_addr. l2_addr = MAC # MAC + Any VLAN filter: This filter is used - * to identify traffic that carries single VLAN tag and matches (destination or - * source) MAC address. This filter can be set up by setting only l2_addr and - * l2_ovlan_mask fields to be valid fields. All other fields are not valid. The - * following values are set for those two valid fields. l2_addr = MAC, - * l2_ovlan_mask = 0xFFFF # MAC + no VLAN or VLAN ID=0: This filter is used to - * identify untagged traffic that does not contain any VLAN tags or a VLAN tag - * with VLAN ID = 0 and matches destination (or source) MAC address. This filter - * can be set up by setting only l2_addr and l2_ovlan fields to be valid fields. - * All other fields are not valid. The following value are set for l2_addr and - * l2_ovlan. l2_addr = MAC, l2_ovlan = 0x0 # MAC + no VLAN or any VLAN: This - * filter is used to identify traffic that contains zero or 1 VLAN tag and - * matches destination (or source) MAC address. This filter can be set up by - * setting only l2_addr, l2_ovlan, and l2_mask fields to be valid fields. All - * other fields are not valid. The following value are set for l2_addr, - * l2_ovlan, and l2_mask fields. l2_addr = MAC, l2_ovlan = 0x0, l2_ovlan_mask = - * 0xFFFF # MAC + VLAN ID filter: This filter can be set up by setting only - * l2_addr, l2_ovlan, and l2_ovlan_mask fields to be valid fields. All other - * fields are not valid. The following values are set for those three valid - * fields. l2_addr = MAC, l2_ovlan = VLAN ID, l2_ovlan_mask = 0xF000 - */ -/* Input (96 bytes) */ -struct hwrm_cfa_l2_filter_alloc_input { - uint16_t req_type; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \ + UINT32_C(0x4) /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This bit must be '1' for the dst_macaddr field to be + * configured. */ - uint16_t cmpl_ring; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \ + UINT32_C(0x8) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the ovlan_vid field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_OVLAN_VID \ + UINT32_C(0x10) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * This bit must be '1' for the ivlan_vid field to be + * configured. */ - uint64_t resp_addr; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IVLAN_VID \ + UINT32_C(0x20) /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This bit must be '1' for the t_ovlan_vid field to be + * configured. */ - uint32_t flags; + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_OVLAN_VID \ + UINT32_C(0x40) /* - * Enumeration denoting the RX, TX type of the resource. This - * enumeration is used for resources that are similar for both - * TX and RX paths of the chip. + * This bit must be '1' for the t_ivlan_vid field to be + * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) - /* tx path */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \ - (UINT32_C(0x0) << 0) - /* rx path */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \ - (UINT32_C(0x1) << 0) - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \ - CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_IVLAN_VID \ + UINT32_C(0x80) /* - * Setting of this flag indicates the applicability to the - * loopback path. + * This bit must be '1' for the ethertype field to be + * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK UINT32_C(0x2) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \ + UINT32_C(0x100) /* - * Setting of this flag indicates drop action. If this flag is - * not set, then it should be considered accept action. + * This bit must be '1' for the src_ipaddr field to be + * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x4) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \ + UINT32_C(0x200) /* - * If this flag is set, all t_l2_* fields are invalid and they - * should not be specified. If this flag is set, then l2_* - * fields refer to fields of outermost L2 header. + * This bit must be '1' for the dst_ipaddr field to be + * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8) - uint32_t enables; - /* This bit must be '1' for the l2_addr field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1) - /* This bit must be '1' for the l2_addr_mask field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \ - UINT32_C(0x2) - /* This bit must be '1' for the l2_ovlan field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN UINT32_C(0x4) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \ + UINT32_C(0x400) /* - * This bit must be '1' for the l2_ovlan_mask field to be + * This bit must be '1' for the ipaddr_type field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \ - UINT32_C(0x8) - /* This bit must be '1' for the l2_ivlan field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN UINT32_C(0x10) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ + UINT32_C(0x800) /* - * This bit must be '1' for the l2_ivlan_mask field to be + * This bit must be '1' for the ip_protocol field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \ - UINT32_C(0x20) - /* This bit must be '1' for the t_l2_addr field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ + UINT32_C(0x1000) /* - * This bit must be '1' for the t_l2_addr_mask field to be + * This bit must be '1' for the src_port field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \ - UINT32_C(0x80) - /* This bit must be '1' for the t_l2_ovlan field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \ - UINT32_C(0x100) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \ + UINT32_C(0x2000) /* - * This bit must be '1' for the t_l2_ovlan_mask field to be + * This bit must be '1' for the dst_port field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \ - UINT32_C(0x200) - /* This bit must be '1' for the t_l2_ivlan field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \ - UINT32_C(0x400) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \ + UINT32_C(0x4000) /* - * This bit must be '1' for the t_l2_ivlan_mask field to be + * This bit must be '1' for the dst_id field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \ - UINT32_C(0x800) - /* This bit must be '1' for the src_type field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE UINT32_C(0x1000) - /* This bit must be '1' for the src_id field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000) - /* This bit must be '1' for the tunnel_type field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ - UINT32_C(0x4000) - /* This bit must be '1' for the dst_id field to be configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x8000) /* * This bit must be '1' for the mirror_vnic_id field to be * configured. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ UINT32_C(0x10000) - uint8_t l2_addr[6]; /* - * This value sets the match value for the L2 MAC address. - * Destination MAC address for RX path. Source MAC address for - * TX path. + * Tunnel identifier. + * Virtual Network Identifier (VNI). Only valid with + * tunnel_types VXLAN, NVGRE, and Geneve. + * Only lower 24-bits of VNI field are used + * in setting up the filter. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t l2_addr_mask[6]; + uint32_t tunnel_id; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Non-tunnel */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Network Virtualization Generic Routing Encapsulation (NVGRE) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* Generic Routing Encapsulation (GRE) inside Ethernet payload */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + UINT32_C(0x7) + /* Generic Routing Encapsulation (GRE) inside IP datagram payload */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL + uint8_t unused_0; + uint16_t unused_1; /* - * This value sets the mask value for the L2 address. A value of - * 0 will mask the corresponding bit from compare. + * This value indicates the source MAC address in + * the Ethernet header. */ - uint16_t l2_ovlan; - /* This value sets VLAN ID value for outer VLAN. */ - uint16_t l2_ovlan_mask; + uint8_t src_macaddr[6]; + uint8_t unused_2[2]; /* - * This value sets the mask value for the ovlan id. A value of 0 - * will mask the corresponding bit from compare. + * This value indicates the destination MAC address in + * the Ethernet header. */ - uint16_t l2_ivlan; - /* This value sets VLAN ID value for inner VLAN. */ - uint16_t l2_ivlan_mask; + uint8_t dst_macaddr[6]; /* - * This value sets the mask value for the ivlan id. A value of 0 - * will mask the corresponding bit from compare. + * This value indicates the VLAN ID of the outer VLAN tag + * in the Ethernet header. */ - uint8_t unused_2; - uint8_t unused_3; - uint8_t t_l2_addr[6]; + uint16_t ovlan_vid; /* - * This value sets the match value for the tunnel L2 MAC - * address. Destination MAC address for RX path. Source MAC - * address for TX path. + * This value indicates the VLAN ID of the inner VLAN tag + * in the Ethernet header. */ - uint8_t unused_4; - uint8_t unused_5; - uint8_t t_l2_addr_mask[6]; + uint16_t ivlan_vid; /* - * This value sets the mask value for the tunnel L2 address. A - * value of 0 will mask the corresponding bit from compare. + * This value indicates the VLAN ID of the outer VLAN tag + * in the tunnel Ethernet header. */ - uint16_t t_l2_ovlan; - /* This value sets VLAN ID value for tunnel outer VLAN. */ - uint16_t t_l2_ovlan_mask; + uint16_t t_ovlan_vid; /* - * This value sets the mask value for the tunnel ovlan id. A - * value of 0 will mask the corresponding bit from compare. + * This value indicates the VLAN ID of the inner VLAN tag + * in the tunnel Ethernet header. */ - uint16_t t_l2_ivlan; - /* This value sets VLAN ID value for tunnel inner VLAN. */ - uint16_t t_l2_ivlan_mask; + uint16_t t_ivlan_vid; + /* This value indicates the ethertype in the Ethernet header. */ + uint16_t ethertype; /* - * This value sets the mask value for the tunnel ivlan id. A - * value of 0 will mask the corresponding bit from compare. + * This value indicates the type of IP address. + * 4 - IPv4 + * 6 - IPv6 + * All others are invalid. */ - uint8_t src_type; - /* This value identifies the type of source of the packet. */ - /* Network port */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0) - /* Physical function */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1) - /* Virtual function */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2) - /* Virtual NIC of a function */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3) - /* Embedded processor for CFA management */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4) - /* Embedded processor for OOB management */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5) - /* Embedded processor for RoCE */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6) - /* Embedded processor for network proxy functions */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7) - uint8_t unused_6; - uint32_t src_id; - /* - * This value is the id of the source. For a network port, it - * represents port_id. For a physical function, it represents - * fid. For a virtual function, it represents vf_id. For a vnic, - * it represents vnic_id. For embedded processors, this id is - * not valid. Notes: 1. The function ID is implied if it src_id - * is not provided for a src_type that is either - */ - uint8_t tunnel_type; - /* Tunnel Type. */ - /* Non-tunnel */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + uint8_t ip_addr_type; + /* invalid */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \ UINT32_C(0x0) - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ - UINT32_C(0x1) + /* IPv4 */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ + UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + UINT32_C(0x6) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \ + HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 /* - * Network Virtualization Generic Routing - * Encapsulation (NVGRE) + * The value of protocol filed in IP header. + * Applies to UDP and TCP traffic. + * 6 - TCP + * 17 - UDP */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ - UINT32_C(0x2) + uint8_t ip_protocol; + /* invalid */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ + UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \ + UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \ + UINT32_C(0x11) + #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \ + HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP + uint16_t unused_3; + uint32_t unused_4; /* - * Generic Routing Encapsulation (GRE) inside - * Ethernet payload + * The value of source IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3) - /* IP in IP */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) - /* Multi-Protocol Lable Switching (MPLS) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6) - /* Stateless Transport Tunnel (STT) */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7) + uint32_t src_ipaddr[4]; /* - * Generic Routing Encapsulation (GRE) inside IP - * datagram payload + * The value of destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8) + uint32_t dst_ipaddr[4]; /* - * IPV4 over virtual eXtensible Local Area - * Network (IPV4oVXLAN) + * The value of source port to be used in filtering. + * Applies to UDP and TCP traffic. */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ - UINT32_C(0x9) - /* Any tunneled traffic */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ - UINT32_C(0xff) - uint8_t unused_7; - uint16_t dst_id; + uint16_t src_port; /* - * If set, this value shall represent the Logical VNIC ID of the - * destination VNIC for the RX path and network port id of the - * destination port for the TX path. + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. */ - uint16_t mirror_vnic_id; - /* Logical VNIC ID of the VNIC where traffic is mirrored. */ - uint8_t pri_hint; + uint16_t dst_port; /* - * This hint is provided to help in placing the filter in the - * filter table. + * If set, this value shall represent the + * Logical VNIC ID of the destination VNIC for the RX + * path. */ - /* No preference */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ - UINT32_C(0x0) - /* Above the given filter */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \ - UINT32_C(0x1) - /* Below the given filter */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \ - UINT32_C(0x2) - /* As high as possible */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX UINT32_C(0x3) - /* As low as possible */ - #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN UINT32_C(0x4) - uint8_t unused_8; - uint32_t unused_9; - uint64_t l2_filter_id_hint; + uint16_t dst_id; /* - * This is the ID of the filter that goes along with the - * pri_hint. This field is valid only for the following values. - * 1 - Above the given filter 2 - Below the given filter + * If set, this value shall represent the L2 context that matches the L2 + * information of the decap filter. */ + uint16_t l2_ctxt_ref_id; } __attribute__((packed)); -/* Output (24 bytes) */ -struct hwrm_cfa_l2_filter_alloc_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint64_t l2_filter_id; - /* - * This value identifies a set of CFA data structures used for - * an L2 context. - */ - uint32_t flow_id; - /* - * This is the ID of the flow associated with this filter. This - * value shall be used to match and associate the flow - * identifier returned in completion records. A value of - * 0xFFFFFFFF shall indicate no flow id. - */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; +/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This value is an opaque id into CFA data structures. */ + uint32_t decap_filter_id; + uint8_t unused_0[3]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_cfa_l2_filter_free */ -/* - * Description: Free a L2 filter. The HWRM shall free all associated filter - * resources with the L2 filter. - */ -/* Input (24 bytes) */ -struct hwrm_cfa_l2_filter_free_input { - uint16_t req_type; +/****************************** + * hwrm_cfa_decap_filter_free * + ******************************/ + + +/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_decap_filter_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t l2_filter_id; + uint64_t resp_addr; + /* This value is an opaque id into CFA data structures. */ + uint32_t decap_filter_id; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value identifies a set of CFA data structures used for - * an L2 context. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_cfa_l2_filter_free_output { - uint16_t error_code; +/*********************** + * hwrm_cfa_flow_alloc * + ***********************/ + + +/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_flow_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_cfa_l2_filter_cfg */ -/* Description: Change the configuration of an existing L2 filter */ -/* Input (40 bytes) */ -struct hwrm_cfa_l2_filter_cfg_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + uint16_t flags; + /* tunnel is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_TUNNEL UINT32_C(0x1) + /* num_vlan is 2 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_MASK UINT32_C(0x6) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_SFT 1 + /* no tags */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_NONE \ + (UINT32_C(0x0) << 1) + /* 1 tag */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_ONE \ + (UINT32_C(0x1) << 1) + /* 2 tags */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO \ + (UINT32_C(0x2) << 1) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_LAST \ + HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO + /* Enumeration denoting the Flow Type. */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_MASK UINT32_C(0x38) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_SFT 3 + /* L2 flow */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_L2 \ + (UINT32_C(0x0) << 3) + /* IPV4 flow */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV4 \ + (UINT32_C(0x1) << 3) + /* IPV6 flow */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6 \ + (UINT32_C(0x2) << 3) + #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_LAST \ + HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6 + /* + * Tx Flow: vf fid. + * Rx Flow: pf fid. + */ + uint16_t src_fid; + /* Tunnel handle valid when tunnel flag is set. */ + uint32_t tunnel_handle; + uint16_t action_flags; + /* + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. + */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_FWD \ + UINT32_C(0x1) + /* recycle is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_RECYCLE \ + UINT32_C(0x2) /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_DROP \ + UINT32_C(0x4) + /* meter is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_METER \ + UINT32_C(0x8) + /* tunnel is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TUNNEL \ + UINT32_C(0x10) + /* nat_src is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_SRC \ + UINT32_C(0x20) + /* nat_dest is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_DEST \ + UINT32_C(0x40) + /* nat_ipv4_address is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_IPV4_ADDRESS \ + UINT32_C(0x80) + /* l2_header_rewrite is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_L2_HEADER_REWRITE \ + UINT32_C(0x100) + /* ttl_decrement is 1 b */ + #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TTL_DECREMENT \ + UINT32_C(0x200) /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Tx Flow: pf or vf fid. + * Rx Flow: vf fid. + */ + uint16_t dst_fid; + /* VLAN tpid, valid when push_vlan flag is set. */ + uint16_t l2_rewrite_vlan_tpid; + /* VLAN tci, valid when push_vlan flag is set. */ + uint16_t l2_rewrite_vlan_tci; + /* Meter id, valid when meter flag is set. */ + uint16_t act_meter_id; + /* Flow with the same l2 context tcam key. */ + uint16_t ref_flow_handle; + /* This value sets the match value for the ethertype. */ + uint16_t ethertype; + /* valid when num tags is 1 or 2. */ + uint16_t outer_vlan_tci; + /* This value sets the match value for the Destination MAC address. */ + uint16_t dmac[3]; + /* valid when num tags is 2. */ + uint16_t inner_vlan_tci; + /* This value sets the match value for the Source MAC address. */ + uint16_t smac[3]; + /* The bit length of destination IP address mask. */ + uint8_t ip_dst_mask_len; + /* The bit length of source IP address mask. */ + uint8_t ip_src_mask_len; + /* The value of destination IPv4/IPv6 address. */ + uint32_t ip_dst[4]; + /* The source IPv4/IPv6 address. */ + uint32_t ip_src[4]; + /* + * The value of source port. + * Applies to UDP and TCP traffic. */ - uint64_t resp_addr; + uint16_t l4_src_port; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The value of source port mask. + * Applies to UDP and TCP traffic. */ - uint32_t flags; + uint16_t l4_src_port_mask; /* - * Enumeration denoting the RX, TX type of the resource. This - * enumeration is used for resources that are similar for both - * TX and RX paths of the chip. + * The value of destination port. + * Applies to UDP and TCP traffic. */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1) - /* tx path */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX \ - (UINT32_C(0x0) << 0) - /* rx path */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX \ - (UINT32_C(0x1) << 0) - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \ - CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX + uint16_t l4_dst_port; /* - * Setting of this flag indicates drop action. If this flag is - * not set, then it should be considered accept action. + * The value of destination port mask. + * Applies to UDP and TCP traffic. */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2) - uint32_t enables; - /* This bit must be '1' for the dst_id field to be configured. */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID UINT32_C(0x1) + uint16_t l4_dst_port_mask; /* - * This bit must be '1' for the new_mirror_vnic_id field to be - * configured. + * NAT IPv4/6 address based on address type flag. + * 0 values are ignored. */ - #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ - UINT32_C(0x2) - uint64_t l2_filter_id; + uint32_t nat_ip_address[4]; + /* L2 header re-write Destination MAC address. */ + uint16_t l2_rewrite_dmac[3]; /* - * This value identifies a set of CFA data structures used for - * an L2 context. - */ - uint32_t dst_id; + * The NAT source/destination port based on direction flag. + * Applies to UDP and TCP traffic. + * 0 values are ignored. + */ + uint16_t nat_port; + /* L2 header re-write Source MAC address. */ + uint16_t l2_rewrite_smac[3]; + /* The value of ip protocol. */ + uint8_t ip_proto; + uint8_t unused_0; +} __attribute__((packed)); + +/* hwrm_cfa_flow_alloc_output (size:128b/16B) */ +struct hwrm_cfa_flow_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Flow record index. */ + uint16_t flow_handle; + uint8_t unused_0[5]; /* - * If set, this value shall represent the Logical VNIC ID of the - * destination VNIC for the RX path and network port id of the - * destination port for the TX path. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint32_t new_mirror_vnic_id; - /* New Logical VNIC ID of the VNIC where traffic is mirrored. */ + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_cfa_l2_filter_cfg_output { - uint16_t error_code; +/********************** + * hwrm_cfa_flow_free * + **********************/ + + +/* hwrm_cfa_flow_free_input (size:192b/24B) */ +struct hwrm_cfa_flow_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* Flow record index. */ + uint16_t flow_handle; + uint8_t unused_0[6]; } __attribute__((packed)); -/* hwrm_cfa_l2_set_rx_mask */ -/* Description: This command will set rx mask of the function. */ -/* Input (56 bytes) */ -struct hwrm_cfa_l2_set_rx_mask_input { - uint16_t req_type; +/* hwrm_cfa_flow_free_output (size:256b/32B) */ +struct hwrm_cfa_flow_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* packet is 64 b */ + uint64_t packet; + /* byte is 64 b */ + uint64_t byte; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_cfa_flow_info * + **********************/ + + +/* hwrm_cfa_flow_info_input (size:192b/24B) */ +struct hwrm_cfa_flow_info_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t vnic_id; - /* VNIC ID */ - uint32_t mask; - /* Reserved for future use. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_RESERVED UINT32_C(0x1) + uint64_t resp_addr; + /* Flow record index. */ + uint16_t flow_handle; + /* Max flow handle */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_MASK \ + UINT32_C(0xfff) + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_SFT 0 + /* CNP flow handle */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT \ + UINT32_C(0x1000) + /* Direction rx = 1 */ + #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX \ + UINT32_C(0x8000) + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_cfa_flow_info_output (size:448b/56B) */ +struct hwrm_cfa_flow_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* flags is 8 b */ + uint8_t flags; + /* profile is 8 b */ + uint8_t profile; + /* src_fid is 16 b */ + uint16_t src_fid; + /* dst_fid is 16 b */ + uint16_t dst_fid; + /* l2_ctxt_id is 16 b */ + uint16_t l2_ctxt_id; + /* em_info is 64 b */ + uint64_t em_info; + /* tcam_info is 64 b */ + uint64_t tcam_info; + /* vfp_tcam_info is 64 b */ + uint64_t vfp_tcam_info; + /* ar_id is 16 b */ + uint16_t ar_id; + /* flow_handle is 16 b */ + uint16_t flow_handle; + /* tunnel_handle is 32 b */ + uint32_t tunnel_handle; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_cfa_flow_flush * + ***********************/ + + +/* hwrm_cfa_flow_flush_input (size:192b/24B) */ +struct hwrm_cfa_flow_flush_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * When this bit is '1', the function is requested to accept - * multi-cast packets specified by the multicast addr table. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST UINT32_C(0x2) + uint16_t cmpl_ring; /* - * When this bit is '1', the function is requested to accept all - * multi-cast packets. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST UINT32_C(0x4) + uint16_t seq_id; /* - * When this bit is '1', the function is requested to accept - * broadcast packets. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST UINT32_C(0x8) - /* - * When this bit is '1', the function is requested to be put in - * the promiscuous mode. The HWRM should accept any function to - * set up promiscuous mode. The HWRM shall follow the semantics - * below for the promiscuous mode support. # When partitioning - * is not enabled on a port (i.e. single PF on the port), then - * the PF shall be allowed to be in the promiscuous mode. When - * the PF is in the promiscuous mode, then it shall receive all - * host bound traffic on that port. # When partitioning is - * enabled on a port (i.e. multiple PFs per port) and a PF on - * that port is in the promiscuous mode, then the PF receives - * all traffic within that partition as identified by a unique - * identifier for the PF (e.g. S-Tag). If a unique outer VLAN - * for the PF is specified, then the setting of promiscuous mode - * on that PF shall result in the PF receiving all host bound - * traffic with matching outer VLAN. # A VF shall can be set in - * the promiscuous mode. In the promiscuous mode, the VF does - * not receive any traffic unless a unique outer VLAN for the VF - * is specified. If a unique outer VLAN for the VF is specified, - * then the setting of promiscuous mode on that VF shall result - * in the VF receiving all host bound traffic with the matching - * outer VLAN. # The HWRM shall allow the setting of promiscuous - * mode on a function independently from the promiscuous mode - * settings on other functions. - */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10) - /* - * If this flag is set, the corresponding RX filters shall be - * set up to cover multicast/broadcast filters for the outermost - * Layer 2 destination MAC address field. - */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST UINT32_C(0x20) - /* - * If this flag is set, the corresponding RX filters shall be - * set up to cover multicast/broadcast filters for the VLAN- - * tagged packets that match the TPID and VID fields of VLAN - * tags in the VLAN tag table specified in this command. + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY UINT32_C(0x40) + uint64_t resp_addr; + uint32_t flags; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_cfa_flow_flush_output (size:128b/16B) */ +struct hwrm_cfa_flow_flush_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * If this flag is set, the corresponding RX filters shall be - * set up to cover multicast/broadcast filters for non-VLAN - * tagged packets and VLAN-tagged packets that match the TPID - * and VID fields of VLAN tags in the VLAN tag table specified - * in this command. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN UINT32_C(0x80) + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_cfa_flow_stats * + ***********************/ + + +/* hwrm_cfa_flow_stats_input (size:320b/40B) */ +struct hwrm_cfa_flow_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If this flag is set, the corresponding RX filters shall be - * set up to cover multicast/broadcast filters for non-VLAN - * tagged packets and VLAN-tagged packets matching any VLAN tag. - * If this flag is set, then the HWRM shall ignore VLAN tags - * specified in vlan_tag_tbl. If none of vlanonly, vlan_nonvlan, - * and anyvlan_nonvlan flags is set, then the HWRM shall ignore - * VLAN tags specified in vlan_tag_tbl. The HWRM client shall - * set at most one flag out of vlanonly, vlan_nonvlan, and - * anyvlan_nonvlan. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \ - UINT32_C(0x100) - uint64_t mc_tbl_addr; - /* This is the address for mcast address tbl. */ - uint32_t num_mc_entries; + uint16_t cmpl_ring; /* - * This value indicates how many entries in mc_tbl are valid. - * Each entry is 6 bytes. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint64_t vlan_tag_tbl_addr; + uint16_t seq_id; /* - * This is the address for VLAN tag table. Each VLAN entry in - * the table is 4 bytes of a VLAN tag including TPID, PCP, DEI, - * and VID fields in network byte order. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t num_vlan_tags; + uint16_t target_id; /* - * This value indicates how many entries in vlan_tag_tbl are - * valid. Each entry is 4 bytes. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t unused_1; + uint64_t resp_addr; + /* Flow handle. */ + uint16_t num_flows; + /* Flow handle. */ + uint16_t flow_handle_0; + /* Flow handle. */ + uint16_t flow_handle_1; + /* Flow handle. */ + uint16_t flow_handle_2; + /* Flow handle. */ + uint16_t flow_handle_3; + /* Flow handle. */ + uint16_t flow_handle_4; + /* Flow handle. */ + uint16_t flow_handle_5; + /* Flow handle. */ + uint16_t flow_handle_6; + /* Flow handle. */ + uint16_t flow_handle_7; + /* Flow handle. */ + uint16_t flow_handle_8; + /* Flow handle. */ + uint16_t flow_handle_9; + uint8_t unused_0[2]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_cfa_l2_set_rx_mask_output { - uint16_t error_code; +/* hwrm_cfa_flow_stats_output (size:1408b/176B) */ +struct hwrm_cfa_flow_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* packet_0 is 64 b */ + uint64_t packet_0; + /* packet_1 is 64 b */ + uint64_t packet_1; + /* packet_2 is 64 b */ + uint64_t packet_2; + /* packet_3 is 64 b */ + uint64_t packet_3; + /* packet_4 is 64 b */ + uint64_t packet_4; + /* packet_5 is 64 b */ + uint64_t packet_5; + /* packet_6 is 64 b */ + uint64_t packet_6; + /* packet_7 is 64 b */ + uint64_t packet_7; + /* packet_8 is 64 b */ + uint64_t packet_8; + /* packet_9 is 64 b */ + uint64_t packet_9; + /* byte_0 is 64 b */ + uint64_t byte_0; + /* byte_1 is 64 b */ + uint64_t byte_1; + /* byte_2 is 64 b */ + uint64_t byte_2; + /* byte_3 is 64 b */ + uint64_t byte_3; + /* byte_4 is 64 b */ + uint64_t byte_4; + /* byte_5 is 64 b */ + uint64_t byte_5; + /* byte_6 is 64 b */ + uint64_t byte_6; + /* byte_7 is 64 b */ + uint64_t byte_7; + /* byte_8 is 64 b */ + uint64_t byte_8; + /* byte_9 is 64 b */ + uint64_t byte_9; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_cfa_vf_pair_alloc * + **************************/ + + +/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* Command specific Error Codes (8 bytes) */ -struct hwrm_cfa_l2_set_rx_mask_cmd_err { - uint8_t code; + uint16_t target_id; /* - * command specific error codes that goes to the cmd_err field - * in Common HWRM Error Response. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* Unknown error */ - #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + uint64_t resp_addr; + /* Logical VF number (range: 0 -> MAX_VFS -1). */ + uint16_t vf_a_id; + /* Logical VF number (range: 0 -> MAX_VFS -1). */ + uint16_t vf_b_id; + uint8_t unused_0[4]; + /* VF Pair name (32 byte string). */ + char pair_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Unable to complete operation due to conflict - * with Ntuple Filter + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define \ - HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \ - UINT32_C(0x1) - uint8_t unused_0[7]; + uint8_t valid; } __attribute__((packed)); -/* hwrm_cfa_vlan_antispoof_cfg */ -/* Description: Configures vlan anti-spoof filters for VF. */ -/* Input (32 bytes) */ -struct hwrm_cfa_vlan_antispoof_cfg_input { - uint16_t req_type; +/************************* + * hwrm_cfa_vf_pair_free * + *************************/ + + +/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_vf_pair_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t fid; + uint64_t resp_addr; + /* VF Pair name (32 byte string). */ + char pair_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Function ID of the function that is being configured. Only valid for - * a VF FID configured by the PF. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t num_vlan_entries; - /* Number of VLAN entries in the vlan_tag_mask_tbl. */ - uint64_t vlan_tag_mask_tbl_addr; + uint8_t valid; +} __attribute__((packed)); + +/************************* + * hwrm_cfa_vf_pair_info * + *************************/ + + +/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_info_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN antispoof - * table. Each table entry contains the 16-bit TPID (0x8100 or 0x88a8 - * only), 16-bit VLAN ID, and a 16-bit mask, all in network order to - * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry, the mask - * value should be 0xfff for the 12-bit VLAN ID. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ -}; - -/* Output (16 bytes) */ -struct hwrm_cfa_vlan_antispoof_cfg_output { - uint16_t error_code; + uint16_t cmpl_ring; /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t seq_id; /* - * This field is the length of the response in bytes. The last byte of - * the response is a valid flag that will read as '1' when the command - * has been completely written to memory. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ -}; + uint64_t resp_addr; + uint32_t flags; + /* If this flag is set, lookup by name else lookup by index. */ + #define HWRM_CFA_VF_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1) + /* vf pair table index. */ + uint16_t vf_pair_index; + uint8_t unused_0[2]; + /* VF Pair name (32 byte string). */ + char vf_pair_name[32]; +} __attribute__((packed)); -/* hwrm_cfa_ntuple_filter_alloc */ -/* - * Description: This is a ntuple filter that uses fields from L4/L3 header and - * optionally fields from L2. The ntuple filters apply to receive traffic only. - * All L2/L3/L4 header fields are specified in network byte order. These filters - * can be used for Receive Flow Steering (RFS). # For ethertype value, only - * 0x0800 (IPv4) and 0x86dd (IPv6) shall be supported for ntuple filters. # If a - * field specified in this command is not enabled as a valid field, then that - * field shall not be used in matching packet header fields against this filter. - */ -/* Input (128 bytes) */ -struct hwrm_cfa_ntuple_filter_alloc_input { - uint16_t req_type; +/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */ +struct hwrm_cfa_vf_pair_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* vf pair table index. */ + uint16_t next_vf_pair_index; + /* vf pair member a's vf_fid. */ + uint16_t vf_a_fid; + /* vf pair member a's Linux logical VF number. */ + uint16_t vf_a_index; + /* vf pair member b's vf_fid. */ + uint16_t vf_b_fid; + /* vf pair member a's Linux logical VF number. */ + uint16_t vf_b_index; + /* vf pair state. */ + uint8_t pair_state; + /* Pair has been allocated */ + #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1) + /* Both pair members are active */ + #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2) + #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \ + HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE + uint8_t unused_0[5]; + /* VF Pair name (32 byte string). */ + char pair_name[32]; + uint8_t unused_1[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_cfa_pair_alloc * + ***********************/ + + +/* hwrm_cfa_pair_alloc_input (size:576b/72B) */ +struct hwrm_cfa_pair_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t flags; + uint64_t resp_addr; + /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair, 5-rep2fn_mod). */ + uint8_t pair_mode; + /* Pair between VF on local host with PF or VF on specified host. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_VF2FN UINT32_C(0x0) + /* Pair between REP on local host with PF or VF on specified host. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN UINT32_C(0x1) + /* Pair between REP on local host with REP on specified host. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2REP UINT32_C(0x2) + /* Pair for the proxy interface. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PROXY UINT32_C(0x3) + /* Pair for the PF interface. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PFPAIR UINT32_C(0x4) + /* Modify exiting rep2fn pair and move pair to new PF. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MOD UINT32_C(0x5) + /* Modify exiting rep2fn pairs paired with same PF and move pairs to new PF. */ + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL UINT32_C(0x6) + #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_LAST \ + HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL + uint8_t unused_0; + /* Logical VF number (range: 0 -> MAX_VFS -1). */ + uint16_t vf_a_id; + /* Logical Host (0xff-local host). */ + uint8_t host_b_id; + /* Logical PF (0xff-PF for command channel). */ + uint8_t pf_b_id; + /* Logical VF number (range: 0 -> MAX_VFS -1). */ + uint16_t vf_b_id; + /* Loopback port (0xff-internal loopback), valid for mode-3. */ + uint8_t port_id; + /* Priority used for encap of loopback packets valid for mode-3. */ + uint8_t pri; + /* New PF for rep2fn modify, valid for mode 5. */ + uint16_t new_pf_fid; + uint32_t enables; /* - * Setting of this flag indicates the applicability to the - * loopback path. + * This bit must be '1' for the q_ab field to be + * configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ - UINT32_C(0x1) + #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID UINT32_C(0x1) /* - * Setting of this flag indicates drop action. If this flag is - * not set, then it should be considered accept action. + * This bit must be '1' for the q_ba field to be + * configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x2) + #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID UINT32_C(0x2) /* - * Setting of this flag indicates that a meter is expected to be - * attached to this flow. This hint can be used when choosing - * the action record format required for the flow. + * This bit must be '1' for the fc_ab field to be + * configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER UINT32_C(0x4) - uint32_t enables; - /* This bit must be '1' for the l2_filter_id field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ - UINT32_C(0x1) - /* This bit must be '1' for the ethertype field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \ - UINT32_C(0x2) - /* This bit must be '1' for the tunnel_type field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ - UINT32_C(0x4) - /* This bit must be '1' for the src_macaddr field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \ - UINT32_C(0x8) - /* This bit must be '1' for the ipaddr_type field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ - UINT32_C(0x10) - /* This bit must be '1' for the src_ipaddr field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \ - UINT32_C(0x20) + #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID UINT32_C(0x4) /* - * This bit must be '1' for the src_ipaddr_mask field to be + * This bit must be '1' for the fc_ba field to be * configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK \ - UINT32_C(0x40) - /* This bit must be '1' for the dst_ipaddr field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \ - UINT32_C(0x80) + #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID UINT32_C(0x8) + /* VF Pair name (32 byte string). */ + char pair_name[32]; /* - * This bit must be '1' for the dst_ipaddr_mask field to be - * configured. + * The q_ab value specifies the logical index of the TX/RX CoS + * queue to be assigned for traffic in the A to B direction of + * the interface pair. The default value is 0. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK \ - UINT32_C(0x100) - /* This bit must be '1' for the ip_protocol field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ - UINT32_C(0x200) - /* This bit must be '1' for the src_port field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \ - UINT32_C(0x400) + uint8_t q_ab; /* - * This bit must be '1' for the src_port_mask field to be - * configured. + * The q_ba value specifies the logical index of the TX/RX CoS + * queue to be assigned for traffic in the B to A direction of + * the interface pair. The default value is 1. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \ - UINT32_C(0x800) - /* This bit must be '1' for the dst_port field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \ - UINT32_C(0x1000) + uint8_t q_ba; /* - * This bit must be '1' for the dst_port_mask field to be - * configured. + * Specifies whether RX ring flow control is disabled (0) or enabled + * (1) in the A to B direction. The default value is 0, meaning that + * packets will be dropped when the B-side RX rings are full. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \ - UINT32_C(0x2000) - /* This bit must be '1' for the pri_hint field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \ - UINT32_C(0x4000) + uint8_t fc_ab; /* - * This bit must be '1' for the ntuple_filter_id field to be - * configured. + * Specifies whether RX ring flow control is disabled (0) or enabled + * (1) in the B to A direction. The default value is 1, meaning that + * the RX CoS queue will be flow controlled when the A-side RX rings + * are full. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_NTUPLE_FILTER_ID \ - UINT32_C(0x8000) - /* This bit must be '1' for the dst_id field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ - UINT32_C(0x10000) + uint8_t fc_ba; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_cfa_pair_alloc_output (size:192b/24B) */ +struct hwrm_cfa_pair_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Only valid for modes 1 and 2. */ + uint16_t rx_cfa_code_a; + /* Only valid for modes 1 and 2. */ + uint16_t tx_cfa_action_a; + /* Only valid for mode 2. */ + uint16_t rx_cfa_code_b; + /* Only valid for mode 2. */ + uint16_t tx_cfa_action_b; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_cfa_pair_free * + **********************/ + + +/* hwrm_cfa_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_pair_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This bit must be '1' for the mirror_vnic_id field to be - * configured. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ - UINT32_C(0x20000) - /* This bit must be '1' for the dst_macaddr field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \ - UINT32_C(0x40000) - uint64_t l2_filter_id; + uint16_t cmpl_ring; /* - * This value identifies a set of CFA data structures used for - * an L2 context. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t src_macaddr[6]; + uint16_t seq_id; /* - * This value indicates the source MAC address in the Ethernet - * header. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t ethertype; - /* This value indicates the ethertype in the Ethernet header. */ - uint8_t ip_addr_type; + uint16_t target_id; /* - * This value indicates the type of IP address. 4 - IPv4 6 - - * IPv6 All others are invalid. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* invalid */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \ - UINT32_C(0x0) - /* IPv4 */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ - UINT32_C(0x4) - /* IPv6 */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ - UINT32_C(0x6) - uint8_t ip_protocol; + uint64_t resp_addr; + /* VF Pair name (32 byte string). */ + char pair_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_pair_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * The value of protocol filed in IP header. Applies to UDP and - * TCP traffic. 6 - TCP 17 - UDP + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - /* invalid */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ - UINT32_C(0x0) - /* TCP */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \ - UINT32_C(0x6) - /* UDP */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \ - UINT32_C(0x11) - uint16_t dst_id; + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_cfa_pair_info * + **********************/ + + +/* hwrm_cfa_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_pair_info_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * If set, this value shall represent the Logical VNIC ID of the - * destination VNIC for the RX path and network port id of the - * destination port for the TX path. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t mirror_vnic_id; - /* Logical VNIC ID of the VNIC where traffic is mirrored. */ - uint8_t tunnel_type; + uint16_t cmpl_ring; /* - * This value indicates the tunnel type for this filter. If this - * field is not specified, then the filter shall apply to both - * non-tunneled and tunneled packets. If this field conflicts - * with the tunnel_type specified in the l2_filter_id, then the - * HWRM shall return an error for this command. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - /* Non-tunnel */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ - UINT32_C(0x0) - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ - UINT32_C(0x1) + uint16_t seq_id; /* - * Network Virtualization Generic Routing - * Encapsulation (NVGRE) + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ - UINT32_C(0x2) + uint16_t target_id; /* - * Generic Routing Encapsulation (GRE) inside - * Ethernet payload + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ - UINT32_C(0x3) - /* IP in IP */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ - UINT32_C(0x4) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ - UINT32_C(0x5) - /* Multi-Protocol Lable Switching (MPLS) */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ - UINT32_C(0x6) - /* Stateless Transport Tunnel (STT) */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7) + uint64_t resp_addr; + uint32_t flags; + /* If this flag is set, lookup by name else lookup by index. */ + #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1) + /* If this flag is set, lookup by PF id and VF id. */ + #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_REPRE UINT32_C(0x2) + /* Pair table index. */ + uint16_t pair_index; + /* Pair pf index. */ + uint8_t pair_pfid; + /* Pair vf index. */ + uint8_t pair_vfid; + /* Pair name (32 byte string). */ + char pair_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_pair_info_output (size:576b/72B) */ +struct hwrm_cfa_pair_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Pair table index. */ + uint16_t next_pair_index; + /* Pair member a's fid. */ + uint16_t a_fid; + /* Logical host number. */ + uint8_t host_a_index; + /* Logical PF number. */ + uint8_t pf_a_index; + /* Pair member a's Linux logical VF number. */ + uint16_t vf_a_index; + /* Rx CFA code. */ + uint16_t rx_cfa_code_a; + /* Tx CFA action. */ + uint16_t tx_cfa_action_a; + /* Pair member b's fid. */ + uint16_t b_fid; + /* Logical host number. */ + uint8_t host_b_index; + /* Logical PF number. */ + uint8_t pf_b_index; + /* Pair member a's Linux logical VF number. */ + uint16_t vf_b_index; + /* Rx CFA code. */ + uint16_t rx_cfa_code_b; + /* Tx CFA action. */ + uint16_t tx_cfa_action_b; + /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair). */ + uint8_t pair_mode; + /* Pair between VF on local host with PF or VF on specified host. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_VF2FN UINT32_C(0x0) + /* Pair between REP on local host with PF or VF on specified host. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2FN UINT32_C(0x1) + /* Pair between REP on local host with REP on specified host. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2REP UINT32_C(0x2) + /* Pair for the proxy interface. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PROXY UINT32_C(0x3) + /* Pair for the PF interface. */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR UINT32_C(0x4) + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_LAST \ + HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR + /* Pair state. */ + uint8_t pair_state; + /* Pair has been allocated */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1) + /* Both pair members are active */ + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2) + #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \ + HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE + /* Pair name (32 byte string). */ + char pair_name[32]; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************** + * hwrm_cfa_vfr_alloc * + **********************/ + + +/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vfr_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Generic Routing Encapsulation (GRE) inside IP - * datagram payload + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ - UINT32_C(0x8) - /* Any tunneled traffic */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ - UINT32_C(0xff) - uint8_t pri_hint; + uint16_t cmpl_ring; /* - * This hint is provided to help in placing the filter in the - * filter table. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - /* No preference */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ - UINT32_C(0x0) - /* Above the given filter */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE UINT32_C(0x1) - /* Below the given filter */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW UINT32_C(0x2) - /* As high as possible */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_HIGHEST \ - UINT32_C(0x3) - /* As low as possible */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST UINT32_C(0x4) - uint32_t src_ipaddr[4]; + uint16_t seq_id; /* - * The value of source IP address to be used in filtering. For - * IPv4, first four bytes represent the IP address. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint32_t src_ipaddr_mask[4]; + uint16_t target_id; /* - * The value of source IP address mask to be used in filtering. - * For IPv4, first four bytes represent the IP address mask. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t dst_ipaddr[4]; + uint64_t resp_addr; + /* Logical VF number (range: 0 -> MAX_VFS -1). */ + uint16_t vf_id; /* - * The value of destination IP address to be used in filtering. - * For IPv4, first four bytes represent the IP address. + * This field is reserved for the future use. + * It shall be set to 0. */ - uint32_t dst_ipaddr_mask[4]; + uint16_t reserved; + uint8_t unused_0[4]; + /* VF Representor name (32 byte string). */ + char vfr_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vfr_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Rx CFA code. */ + uint16_t rx_cfa_code; + /* Tx CFA action. */ + uint16_t tx_cfa_action; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/********************* + * hwrm_cfa_vfr_free * + *********************/ + + +/* hwrm_cfa_vfr_free_input (size:384b/48B) */ +struct hwrm_cfa_vfr_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * The value of destination IP address mask to be used in - * filtering. For IPv4, first four bytes represent the IP - * address mask. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t src_port; + uint16_t cmpl_ring; /* - * The value of source port to be used in filtering. Applies to - * UDP and TCP traffic. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t src_port_mask; + uint16_t seq_id; /* - * The value of source port mask to be used in filtering. - * Applies to UDP and TCP traffic. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t dst_port; + uint16_t target_id; /* - * The value of destination port to be used in filtering. - * Applies to UDP and TCP traffic. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t dst_port_mask; + uint64_t resp_addr; + /* VF Representor name (32 byte string). */ + char vfr_name[32]; +} __attribute__((packed)); + +/* hwrm_cfa_vfr_free_output (size:128b/16B) */ +struct hwrm_cfa_vfr_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * The value of destination port mask to be used in filtering. - * Applies to UDP and TCP traffic. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t ntuple_filter_id_hint; - /* This is the ID of the filter that goes along with the pri_hint. */ + uint8_t valid; } __attribute__((packed)); -/* Output (24 bytes) */ -struct hwrm_cfa_ntuple_filter_alloc_output { - uint16_t error_code; +/****************************** + * hwrm_tunnel_dst_port_query * + ******************************/ + + +/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_query_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t ntuple_filter_id; - /* This value is an opaque id into CFA data structures. */ - uint32_t flow_id; + uint16_t seq_id; /* - * This is the ID of the flow associated with this filter. This - * value shall be used to match and associate the flow - * identifier returned in completion records. A value of - * 0xFFFFFFFF shall indicate no flow id. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST \ + HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 + uint8_t unused_0[7]; } __attribute__((packed)); -/* Command specific Error Codes (8 bytes) */ -struct hwrm_cfa_ntuple_filter_alloc_cmd_err { - uint8_t code; +/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * command specific error codes that goes to the cmd_err field - * in Common HWRM Error Response. + * This field represents the identifier of L4 destination port + * used for the given tunnel type. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) + * transports for tunneling. */ - /* Unknown error */ - #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + uint16_t tunnel_dst_port_id; + /* + * This field represents the value of L4 destination port + * identified by tunnel_dst_port_id. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) + * transports for tunneling. + * This field is in network byte order. + * + * A value of 0 means that the destination port is not + * configured. + */ + uint16_t tunnel_dst_port_val; + uint8_t unused_0[3]; /* - * Unable to complete operation due to conflict - * with Rx Mask VLAN + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define \ - HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \ - UINT32_C(0x1) - uint8_t unused_0[7]; + uint8_t valid; } __attribute__((packed)); -/* hwrm_cfa_ntuple_filter_free */ -/* Description: Free an ntuple filter */ -/* Input (24 bytes) */ -struct hwrm_cfa_ntuple_filter_free_input { - uint16_t req_type; +/****************************** + * hwrm_tunnel_dst_port_alloc * + ******************************/ + + +/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t ntuple_filter_id; - /* This value is an opaque id into CFA data structures. */ + uint64_t resp_addr; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_LAST \ + HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 + uint8_t unused_0; + /* + * This field represents the value of L4 destination port used + * for the given tunnel type. This field is valid for + * specific tunnel types that use layer 4 (e.g. UDP) + * transports for tunneling. + * + * This field is in network byte order. + * + * A value of 0 shall fail the command. + */ + uint16_t tunnel_dst_port_val; + uint8_t unused_1[4]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_cfa_ntuple_filter_free_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; +/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Identifier of a tunnel L4 destination port value. Only applies to tunnel + * types that has l4 destination port parameters. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t tunnel_dst_port_id; + uint8_t unused_0[5]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_cfa_ntuple_filter_cfg */ -/* - * Description: Configure an ntuple filter with a new destination VNIC and/or - * meter. - */ -/* Input (48 bytes) */ -struct hwrm_cfa_ntuple_filter_cfg_input { - uint16_t req_type; +/***************************** + * hwrm_tunnel_dst_port_free * + *****************************/ + + +/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t enables; - /* This bit must be '1' for the new_dst_id field to be configured. */ - #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \ + uint64_t resp_addr; + /* Tunnel Type. */ + uint8_t tunnel_type; + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN \ UINT32_C(0x1) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) + /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \ + UINT32_C(0xa) + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_LAST \ + HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 + uint8_t unused_0; + /* + * Identifier of a tunnel L4 destination port value. Only applies to tunnel + * types that has l4 destination port parameters. + */ + uint16_t tunnel_dst_port_id; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_1[7]; /* - * This bit must be '1' for the new_mirror_vnic_id field to be - * configured. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* ctx_hw_stats (size:1280b/160B) */ +struct ctx_hw_stats { + /* Number of received unicast packets */ + uint64_t rx_ucast_pkts; + /* Number of received multicast packets */ + uint64_t rx_mcast_pkts; + /* Number of received broadcast packets */ + uint64_t rx_bcast_pkts; + /* Number of discarded packets on received path */ + uint64_t rx_discard_pkts; + /* Number of dropped packets on received path */ + uint64_t rx_drop_pkts; + /* Number of received bytes for unicast traffic */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for multicast traffic */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for broadcast traffic */ + uint64_t rx_bcast_bytes; + /* Number of transmitted unicast packets */ + uint64_t tx_ucast_pkts; + /* Number of transmitted multicast packets */ + uint64_t tx_mcast_pkts; + /* Number of transmitted broadcast packets */ + uint64_t tx_bcast_pkts; + /* Number of discarded packets on transmit path */ + uint64_t tx_discard_pkts; + /* Number of dropped packets on transmit path */ + uint64_t tx_drop_pkts; + /* Number of transmitted bytes for unicast traffic */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t tx_bcast_bytes; + /* Number of TPA packets */ + uint64_t tpa_pkts; + /* Number of TPA bytes */ + uint64_t tpa_bytes; + /* Number of TPA events */ + uint64_t tpa_events; + /* Number of TPA aborts */ + uint64_t tpa_aborts; +} __attribute__((packed)); + +/*********************** + * hwrm_stat_ctx_alloc * + ***********************/ + + +/* hwrm_stat_ctx_alloc_input (size:256b/32B) */ +struct hwrm_stat_ctx_alloc_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; + /* + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM + */ + uint16_t target_id; + /* + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ - UINT32_C(0x2) + uint64_t resp_addr; + /* This is the address for statistic block. */ + uint64_t stats_dma_addr; /* - * This bit must be '1' for the new_meter_instance_id field to - * be configured. + * The statistic block update period in ms. + * e.g. 250ms, 500ms, 750ms, 1000ms. + * If update_period_ms is 0, then the stats update + * shall be never done and the DMA address shall not be used. + * In this case, the stat block can only be read by + * hwrm_stat_ctx_query command. */ - #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ - UINT32_C(0x4) - uint32_t unused_0; - uint64_t ntuple_filter_id; - /* This value is an opaque id into CFA data structures. */ - uint32_t new_dst_id; + uint32_t update_period_ms; /* - * If set, this value shall represent the new Logical VNIC ID of - * the destination VNIC for the RX path and new network port id - * of the destination port for the TX path. + * This field is used to specify statistics context specific + * configuration flags. */ - uint32_t new_mirror_vnic_id; - /* New Logical VNIC ID of the VNIC where traffic is mirrored. */ - uint16_t new_meter_instance_id; + uint8_t stat_ctx_flags; /* - * New meter to attach to the flow. Specifying the invalid - * instance ID is used to remove any existing meter from the - * flow. + * When this bit is set to '1', the statistics context shall be + * allocated for RoCE traffic only. In this case, traffic other + * than offloaded RoCE traffic shall not be included in this + * statistic context. + * When this bit is set to '0', the statistics context shall be + * used for the network traffic other than offloaded RoCE traffic. */ + #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1) + uint8_t unused_0[3]; +} __attribute__((packed)); + +/* hwrm_stat_ctx_alloc_output (size:128b/16B) */ +struct hwrm_stat_ctx_alloc_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This is the statistics context ID value. */ + uint32_t stat_ctx_id; + uint8_t unused_0[3]; /* - * A value of 0xfff is considered invalid and - * implies the instance is not configured. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ - UINT32_C(0xffff) - uint16_t unused_1[3]; + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_cfa_ntuple_filter_cfg_output { - uint16_t error_code; +/********************** + * hwrm_stat_ctx_free * + **********************/ + + +/* hwrm_stat_ctx_free_input (size:192b/24B) */ +struct hwrm_stat_ctx_free_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_cfa_em_flow_alloc */ -/* - * Description: This is a generic Exact Match (EM) flow that uses fields from - * L4/L3/L2 headers. The EM flows apply to transmit and receive traffic. All - * L2/L3/L4 header fields are specified in network byte order. For each EM flow, - * there is an associated set of actions specified. For tunneled packets, all - * L2/L3/L4 fields specified are fields of inner headers unless otherwise - * specified. # If a field specified in this command is not enabled as a valid - * field, then that field shall not be used in matching packet header fields - * against this EM flow entry. - */ -/* Input (112 bytes) */ -struct hwrm_cfa_em_flow_alloc_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_stat_ctx_free_output (size:128b/16B) */ +struct hwrm_stat_ctx_free_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* This is the statistics context ID value. */ + uint32_t stat_ctx_id; + uint8_t unused_0[3]; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint8_t valid; +} __attribute__((packed)); + +/*********************** + * hwrm_stat_ctx_query * + ***********************/ + + +/* hwrm_stat_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_query_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t resp_addr; + uint16_t cmpl_ring; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t flags; + uint16_t seq_id; /* - * Enumeration denoting the RX, TX type of the resource. This - * enumeration is used for resources that are similar for both - * TX and RX paths of the chip. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) - /* tx path */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX \ - (UINT32_C(0x0) << 0) - /* rx path */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX \ - (UINT32_C(0x1) << 0) - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_LAST \ - CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX - /* - * Setting of this flag indicates enabling of a byte counter for - * a given flow. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2) - /* - * Setting of this flag indicates enabling of a packet counter - * for a given flow. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4) - /* - * Setting of this flag indicates de-capsulation action for the - * given flow. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8) - /* - * Setting of this flag indicates encapsulation action for the - * given flow. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10) - /* - * Setting of this flag indicates drop action. If this flag is - * not set, then it should be considered accept action. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20) - /* - * Setting of this flag indicates that a meter is expected to be - * attached to this flow. This hint can be used when choosing - * the action record format required for the flow. - */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40) - uint32_t enables; - /* This bit must be '1' for the l2_filter_id field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID UINT32_C(0x1) - /* This bit must be '1' for the tunnel_type field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x2) - /* This bit must be '1' for the tunnel_id field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID UINT32_C(0x4) - /* This bit must be '1' for the src_macaddr field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR UINT32_C(0x8) - /* This bit must be '1' for the dst_macaddr field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR UINT32_C(0x10) - /* This bit must be '1' for the ovlan_vid field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID UINT32_C(0x20) - /* This bit must be '1' for the ivlan_vid field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID UINT32_C(0x40) - /* This bit must be '1' for the ethertype field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE UINT32_C(0x80) - /* This bit must be '1' for the src_ipaddr field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR UINT32_C(0x100) - /* This bit must be '1' for the dst_ipaddr field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR UINT32_C(0x200) - /* This bit must be '1' for the ipaddr_type field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE UINT32_C(0x400) - /* This bit must be '1' for the ip_protocol field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL UINT32_C(0x800) - /* This bit must be '1' for the src_port field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT UINT32_C(0x1000) - /* This bit must be '1' for the dst_port field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT UINT32_C(0x2000) - /* This bit must be '1' for the dst_id field to be configured. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x4000) + uint16_t target_id; /* - * This bit must be '1' for the mirror_vnic_id field to be - * configured. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ - UINT32_C(0x8000) + uint64_t resp_addr; + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_stat_ctx_query_output (size:1408b/176B) */ +struct hwrm_stat_ctx_query_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of transmitted unicast packets */ + uint64_t tx_ucast_pkts; + /* Number of transmitted multicast packets */ + uint64_t tx_mcast_pkts; + /* Number of transmitted broadcast packets */ + uint64_t tx_bcast_pkts; + /* Number of transmitted packets with error */ + uint64_t tx_err_pkts; + /* Number of dropped packets on transmit path */ + uint64_t tx_drop_pkts; + /* Number of transmitted bytes for unicast traffic */ + uint64_t tx_ucast_bytes; + /* Number of transmitted bytes for multicast traffic */ + uint64_t tx_mcast_bytes; + /* Number of transmitted bytes for broadcast traffic */ + uint64_t tx_bcast_bytes; + /* Number of received unicast packets */ + uint64_t rx_ucast_pkts; + /* Number of received multicast packets */ + uint64_t rx_mcast_pkts; + /* Number of received broadcast packets */ + uint64_t rx_bcast_pkts; + /* Number of received packets with error */ + uint64_t rx_err_pkts; + /* Number of dropped packets on received path */ + uint64_t rx_drop_pkts; + /* Number of received bytes for unicast traffic */ + uint64_t rx_ucast_bytes; + /* Number of received bytes for multicast traffic */ + uint64_t rx_mcast_bytes; + /* Number of received bytes for broadcast traffic */ + uint64_t rx_bcast_bytes; + /* Number of aggregated unicast packets */ + uint64_t rx_agg_pkts; + /* Number of aggregated unicast bytes */ + uint64_t rx_agg_bytes; + /* Number of aggregation events */ + uint64_t rx_agg_events; + /* Number of aborted aggregations */ + uint64_t rx_agg_aborts; + uint8_t unused_0[7]; /* - * This bit must be '1' for the encap_record_id field to be - * configured. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \ - UINT32_C(0x10000) + uint8_t valid; +} __attribute__((packed)); + +/*************************** + * hwrm_stat_ctx_clr_stats * + ***************************/ + + +/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */ +struct hwrm_stat_ctx_clr_stats_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This bit must be '1' for the meter_instance_id field to be - * configured. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \ - UINT32_C(0x20000) - uint64_t l2_filter_id; + uint16_t cmpl_ring; /* - * This value identifies a set of CFA data structures used for - * an L2 context. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t tunnel_type; - /* Tunnel Type. */ - /* Non-tunnel */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ - UINT32_C(0x0) - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) + uint16_t seq_id; /* - * Network Virtualization Generic Routing - * Encapsulation (NVGRE) + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2) + uint16_t target_id; /* - * Generic Routing Encapsulation (GRE) inside - * Ethernet payload + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3) - /* IP in IP */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) - /* Multi-Protocol Lable Switching (MPLS) */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6) - /* Stateless Transport Tunnel (STT) */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7) + uint64_t resp_addr; + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + uint8_t unused_0[4]; +} __attribute__((packed)); + +/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */ +struct hwrm_stat_ctx_clr_stats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Generic Routing Encapsulation (GRE) inside IP - * datagram payload + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8) + uint8_t valid; +} __attribute__((packed)); + +/******************** + * hwrm_pcie_qstats * + ********************/ + + +/* hwrm_pcie_qstats_input (size:256b/32B) */ +struct hwrm_pcie_qstats_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * IPV4 over virtual eXtensible Local Area - * Network (IPV4oVXLAN) + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 UINT32_C(0x9) - /* Any tunneled traffic */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ - UINT32_C(0xff) - uint8_t unused_0; - uint16_t unused_1; - uint32_t tunnel_id; + uint16_t cmpl_ring; /* - * Tunnel identifier. Virtual Network Identifier (VNI). Only - * valid with tunnel_types VXLAN, NVGRE, and Geneve. Only lower - * 24-bits of VNI field are used in setting up the filter. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint8_t src_macaddr[6]; + uint16_t seq_id; /* - * This value indicates the source MAC address in the Ethernet - * header. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t meter_instance_id; - /* The meter instance to attach to the flow. */ + uint16_t target_id; /* - * A value of 0xfff is considered invalid and - * implies the instance is not configured. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \ - UINT32_C(0xffff) - uint8_t dst_macaddr[6]; + uint64_t resp_addr; /* - * This value indicates the destination MAC address in the - * Ethernet header. + * The size of PCIe statistics block in bytes. + * Firmware will DMA the PCIe statistics to + * the host with this field size in the response. */ - uint16_t ovlan_vid; + uint16_t pcie_stat_size; + uint8_t unused_0[6]; /* - * This value indicates the VLAN ID of the outer VLAN tag in the - * Ethernet header. + * This is the host address where + * PCIe statistics will be stored */ - uint16_t ivlan_vid; + uint64_t pcie_stat_host_addr; +} __attribute__((packed)); + +/* hwrm_pcie_qstats_output (size:128b/16B) */ +struct hwrm_pcie_qstats_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* The size of PCIe statistics block in bytes. */ + uint16_t pcie_stat_size; + uint8_t unused_0[5]; /* - * This value indicates the VLAN ID of the inner VLAN tag in the - * Ethernet header. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t ethertype; - /* This value indicates the ethertype in the Ethernet header. */ - uint8_t ip_addr_type; + uint8_t valid; +} __attribute__((packed)); + +/* Port Tx Statistics Formats */ +/* tx_port_stats (size:3264b/408B) */ +struct tx_port_stats { + /* Total Number of 64 Bytes frames transmitted */ + uint64_t tx_64b_frames; + /* Total Number of 65-127 Bytes frames transmitted */ + uint64_t tx_65b_127b_frames; + /* Total Number of 128-255 Bytes frames transmitted */ + uint64_t tx_128b_255b_frames; + /* Total Number of 256-511 Bytes frames transmitted */ + uint64_t tx_256b_511b_frames; + /* Total Number of 512-1023 Bytes frames transmitted */ + uint64_t tx_512b_1023b_frames; + /* Total Number of 1024-1518 Bytes frames transmitted */ + uint64_t tx_1024b_1518_frames; /* - * This value indicates the type of IP address. 4 - IPv4 6 - - * IPv6 All others are invalid. + * Total Number of each good VLAN (exludes FCS errors) + * frame transmitted which is 1519 to 1522 bytes in length + * inclusive (excluding framing bits but including FCS bytes). */ - /* invalid */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN UINT32_C(0x0) - /* IPv4 */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4) - /* IPv6 */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6) - uint8_t ip_protocol; + uint64_t tx_good_vlan_frames; + /* Total Number of 1519-2047 Bytes frames transmitted */ + uint64_t tx_1519b_2047_frames; + /* Total Number of 2048-4095 Bytes frames transmitted */ + uint64_t tx_2048b_4095b_frames; + /* Total Number of 4096-9216 Bytes frames transmitted */ + uint64_t tx_4096b_9216b_frames; + /* Total Number of 9217-16383 Bytes frames transmitted */ + uint64_t tx_9217b_16383b_frames; + /* Total Number of good frames transmitted */ + uint64_t tx_good_frames; + /* Total Number of frames transmitted */ + uint64_t tx_total_frames; + /* Total number of unicast frames transmitted */ + uint64_t tx_ucast_frames; + /* Total number of multicast frames transmitted */ + uint64_t tx_mcast_frames; + /* Total number of broadcast frames transmitted */ + uint64_t tx_bcast_frames; + /* Total number of PAUSE control frames transmitted */ + uint64_t tx_pause_frames; /* - * The value of protocol filed in IP header. Applies to UDP and - * TCP traffic. 6 - TCP 17 - UDP + * Total number of PFC/per-priority PAUSE + * control frames transmitted */ - /* invalid */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN UINT32_C(0x0) - /* TCP */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6) - /* UDP */ - #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11) - uint8_t unused_2; - uint8_t unused_3; - uint32_t src_ipaddr[4]; + uint64_t tx_pfc_frames; + /* Total number of jabber frames transmitted */ + uint64_t tx_jabber_frames; + /* Total number of frames transmitted with FCS error */ + uint64_t tx_fcs_err_frames; + /* Total number of control frames transmitted */ + uint64_t tx_control_frames; + /* Total number of over-sized frames transmitted */ + uint64_t tx_oversz_frames; + /* Total number of frames with single deferral */ + uint64_t tx_single_dfrl_frames; + /* Total number of frames with multiple deferrals */ + uint64_t tx_multi_dfrl_frames; + /* Total number of frames with single collision */ + uint64_t tx_single_coll_frames; + /* Total number of frames with multiple collisions */ + uint64_t tx_multi_coll_frames; + /* Total number of frames with late collisions */ + uint64_t tx_late_coll_frames; + /* Total number of frames with excessive collisions */ + uint64_t tx_excessive_coll_frames; + /* Total number of fragmented frames transmitted */ + uint64_t tx_frag_frames; + /* Total number of transmit errors */ + uint64_t tx_err; + /* Total number of single VLAN tagged frames transmitted */ + uint64_t tx_tagged_frames; + /* Total number of double VLAN tagged frames transmitted */ + uint64_t tx_dbl_tagged_frames; + /* Total number of runt frames transmitted */ + uint64_t tx_runt_frames; + /* Total number of TX FIFO under runs */ + uint64_t tx_fifo_underruns; /* - * The value of source IP address to be used in filtering. For - * IPv4, first four bytes represent the IP address. + * Total number of PFC frames with PFC enabled bit for + * Pri 0 transmitted */ - uint32_t dst_ipaddr[4]; + uint64_t tx_pfc_ena_frames_pri0; /* - * big_endian = True The value of destination IP address to be - * used in filtering. For IPv4, first four bytes represent the - * IP address. + * Total number of PFC frames with PFC enabled bit for + * Pri 1 transmitted */ - uint16_t src_port; + uint64_t tx_pfc_ena_frames_pri1; /* - * The value of source port to be used in filtering. Applies to - * UDP and TCP traffic. + * Total number of PFC frames with PFC enabled bit for + * Pri 2 transmitted */ - uint16_t dst_port; + uint64_t tx_pfc_ena_frames_pri2; /* - * The value of destination port to be used in filtering. - * Applies to UDP and TCP traffic. + * Total number of PFC frames with PFC enabled bit for + * Pri 3 transmitted */ - uint16_t dst_id; + uint64_t tx_pfc_ena_frames_pri3; /* - * If set, this value shall represent the Logical VNIC ID of the - * destination VNIC for the RX path and network port id of the - * destination port for the TX path. + * Total number of PFC frames with PFC enabled bit for + * Pri 4 transmitted */ - uint16_t mirror_vnic_id; - /* Logical VNIC ID of the VNIC where traffic is mirrored. */ - uint32_t encap_record_id; - /* Logical ID of the encapsulation record. */ - uint32_t unused_4; -} __attribute__((packed)); - -/* Output (24 bytes) */ -struct hwrm_cfa_em_flow_alloc_output { - uint16_t error_code; + uint64_t tx_pfc_ena_frames_pri4; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Total number of PFC frames with PFC enabled bit for + * Pri 5 transmitted */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t tx_pfc_ena_frames_pri5; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Total number of PFC frames with PFC enabled bit for + * Pri 6 transmitted */ - uint64_t em_filter_id; - /* This value is an opaque id into CFA data structures. */ - uint32_t flow_id; + uint64_t tx_pfc_ena_frames_pri6; /* - * This is the ID of the flow associated with this filter. This - * value shall be used to match and associate the flow - * identifier returned in completion records. A value of - * 0xFFFFFFFF shall indicate no flow id. + * Total number of PFC frames with PFC enabled bit for + * Pri 7 transmitted */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint64_t tx_pfc_ena_frames_pri7; + /* Total number of EEE LPI Events on TX */ + uint64_t tx_eee_lpi_events; + /* EEE LPI Duration Counter on TX */ + uint64_t tx_eee_lpi_duration; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Total number of Link Level Flow Control (LLFC) messages + * transmitted */ + uint64_t tx_llfc_logical_msgs; + /* Total number of HCFC messages transmitted */ + uint64_t tx_hcfc_msgs; + /* Total number of TX collisions */ + uint64_t tx_total_collisions; + /* Total number of transmitted bytes */ + uint64_t tx_bytes; + /* Total number of end-to-end HOL frames */ + uint64_t tx_xthol_frames; + /* Total Tx Drops per Port reported by STATS block */ + uint64_t tx_stat_discard; + /* Total Tx Error Drops per Port reported by STATS block */ + uint64_t tx_stat_error; } __attribute__((packed)); -/* hwrm_cfa_em_flow_free */ -/* Description: Free an EM flow table entry */ -/* Input (24 bytes) */ -struct hwrm_cfa_em_flow_free_input { - uint16_t req_type; +/* Port Rx Statistics Formats */ +/* rx_port_stats (size:4224b/528B) */ +struct rx_port_stats { + /* Total Number of 64 Bytes frames received */ + uint64_t rx_64b_frames; + /* Total Number of 65-127 Bytes frames received */ + uint64_t rx_65b_127b_frames; + /* Total Number of 128-255 Bytes frames received */ + uint64_t rx_128b_255b_frames; + /* Total Number of 256-511 Bytes frames received */ + uint64_t rx_256b_511b_frames; + /* Total Number of 512-1023 Bytes frames received */ + uint64_t rx_512b_1023b_frames; + /* Total Number of 1024-1518 Bytes frames received */ + uint64_t rx_1024b_1518_frames; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Total Number of each good VLAN (exludes FCS errors) + * frame received which is 1519 to 1522 bytes in length + * inclusive (excluding framing bits but including FCS bytes). */ - uint16_t cmpl_ring; + uint64_t rx_good_vlan_frames; + /* Total Number of 1519-2047 Bytes frames received */ + uint64_t rx_1519b_2047b_frames; + /* Total Number of 2048-4095 Bytes frames received */ + uint64_t rx_2048b_4095b_frames; + /* Total Number of 4096-9216 Bytes frames received */ + uint64_t rx_4096b_9216b_frames; + /* Total Number of 9217-16383 Bytes frames received */ + uint64_t rx_9217b_16383b_frames; + /* Total number of frames received */ + uint64_t rx_total_frames; + /* Total number of unicast frames received */ + uint64_t rx_ucast_frames; + /* Total number of multicast frames received */ + uint64_t rx_mcast_frames; + /* Total number of broadcast frames received */ + uint64_t rx_bcast_frames; + /* Total number of received frames with FCS error */ + uint64_t rx_fcs_err_frames; + /* Total number of control frames received */ + uint64_t rx_ctrl_frames; + /* Total number of PAUSE frames received */ + uint64_t rx_pause_frames; + /* Total number of PFC frames received */ + uint64_t rx_pfc_frames; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Total number of frames received with an unsupported + * opcode */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint64_t rx_unsupported_opcode_frames; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Total number of frames received with an unsupported + * DA for pause and PFC */ - uint64_t resp_addr; + uint64_t rx_unsupported_da_pausepfc_frames; + /* Total number of frames received with an unsupported SA */ + uint64_t rx_wrong_sa_frames; + /* Total number of received packets with alignment error */ + uint64_t rx_align_err_frames; + /* Total number of received frames with out-of-range length */ + uint64_t rx_oor_len_frames; + /* Total number of received frames with error termination */ + uint64_t rx_code_err_frames; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. - */ - uint64_t em_filter_id; - /* This value is an opaque id into CFA data structures. */ -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_cfa_em_flow_free_output { - uint16_t error_code; + * Total number of received frames with a false carrier is + * detected during idle, as defined by RX_ER samples active + * and RXD is 0xE. The event is reported along with the + * statistics generated on the next received frame. Only + * one false carrier condition can be detected and logged + * between frames. + * + * Carrier event, valid for 10M/100M speed modes only. + */ + uint64_t rx_false_carrier_frames; + /* Total number of over-sized frames received */ + uint64_t rx_ovrsz_frames; + /* Total number of jabber packets received */ + uint64_t rx_jbr_frames; + /* Total number of received frames with MTU error */ + uint64_t rx_mtu_err_frames; + /* Total number of received frames with CRC match */ + uint64_t rx_match_crc_frames; + /* Total number of frames received promiscuously */ + uint64_t rx_promiscuous_frames; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Total number of received frames with one or two VLAN + * tags */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t rx_tagged_frames; + /* Total number of received frames with two VLAN tags */ + uint64_t rx_double_tagged_frames; + /* Total number of truncated frames received */ + uint64_t rx_trunc_frames; + /* Total number of good frames (without errors) received */ + uint64_t rx_good_frames; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 0 */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint64_t rx_pfc_xon2xoff_frames_pri0; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 1 */ -} __attribute__((packed)); - -/* hwrm_cfa_em_flow_cfg */ -/* - * Description: Configure an EM flow with a new destination VNIC and/or meter. - */ -/* Input (48 bytes) */ -struct hwrm_cfa_em_flow_cfg_input { - uint16_t req_type; + uint64_t rx_pfc_xon2xoff_frames_pri1; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 2 */ - uint16_t cmpl_ring; + uint64_t rx_pfc_xon2xoff_frames_pri2; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 3 */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint64_t rx_pfc_xon2xoff_frames_pri3; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 4 */ - uint64_t resp_addr; + uint64_t rx_pfc_xon2xoff_frames_pri4; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 5 */ - uint32_t enables; - /* This bit must be '1' for the new_dst_id field to be configured. */ - #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID UINT32_C(0x1) + uint64_t rx_pfc_xon2xoff_frames_pri5; /* - * This bit must be '1' for the new_mirror_vnic_id field to be - * configured. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 6 */ - #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ - UINT32_C(0x2) + uint64_t rx_pfc_xon2xoff_frames_pri6; /* - * This bit must be '1' for the new_meter_instance_id field to - * be configured. + * Total number of received PFC frames with transition from + * XON to XOFF on Pri 7 */ - #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ - UINT32_C(0x4) - uint32_t unused_0; - uint64_t em_filter_id; - /* This value is an opaque id into CFA data structures. */ - uint32_t new_dst_id; + uint64_t rx_pfc_xon2xoff_frames_pri7; /* - * If set, this value shall represent the new Logical VNIC ID of - * the destination VNIC for the RX path and network port id of - * the destination port for the TX path. + * Total number of received PFC frames with PFC enabled + * bit for Pri 0 */ - uint32_t new_mirror_vnic_id; - /* New Logical VNIC ID of the VNIC where traffic is mirrored. */ - uint16_t new_meter_instance_id; + uint64_t rx_pfc_ena_frames_pri0; /* - * New meter to attach to the flow. Specifying the invalid - * instance ID is used to remove any existing meter from the - * flow. + * Total number of received PFC frames with PFC enabled + * bit for Pri 1 */ + uint64_t rx_pfc_ena_frames_pri1; /* - * A value of 0xfff is considered invalid and - * implies the instance is not configured. + * Total number of received PFC frames with PFC enabled + * bit for Pri 2 */ - #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ - UINT32_C(0xffff) - uint16_t unused_1[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_cfa_em_flow_cfg_output { - uint16_t error_code; + uint64_t rx_pfc_ena_frames_pri2; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Total number of received PFC frames with PFC enabled + * bit for Pri 3 */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t rx_pfc_ena_frames_pri3; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * Total number of received PFC frames with PFC enabled + * bit for Pri 4 */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint64_t rx_pfc_ena_frames_pri4; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Total number of received PFC frames with PFC enabled + * bit for Pri 5 */ -} __attribute__((packed)); - -/* hwrm_tunnel_dst_port_query */ -/* - * Description: This function is called by a driver to query tunnel type - * specific destination port configuration. - */ -/* Input (24 bytes) */ -struct hwrm_tunnel_dst_port_query_input { - uint16_t req_type; + uint64_t rx_pfc_ena_frames_pri5; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Total number of received PFC frames with PFC enabled + * bit for Pri 6 */ - uint16_t cmpl_ring; + uint64_t rx_pfc_ena_frames_pri6; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Total number of received PFC frames with PFC enabled + * bit for Pri 7 */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint64_t rx_pfc_ena_frames_pri7; + /* Total Number of frames received with SCH CRC error */ + uint64_t rx_sch_crc_err_frames; + /* Total Number of under-sized frames received */ + uint64_t rx_undrsz_frames; + /* Total Number of fragmented frames received */ + uint64_t rx_frag_frames; + /* Total number of RX EEE LPI Events */ + uint64_t rx_eee_lpi_events; + /* EEE LPI Duration Counter on RX */ + uint64_t rx_eee_lpi_duration; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Total number of physical type Link Level Flow Control + * (LLFC) messages received */ - uint64_t resp_addr; + uint64_t rx_llfc_physical_msgs; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * Total number of logical type Link Level Flow Control + * (LLFC) messages received */ - uint8_t tunnel_type; - /* Tunnel Type. */ - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \ - UINT32_C(0x1) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \ - UINT32_C(0x5) + uint64_t rx_llfc_logical_msgs; /* - * IPV4 over virtual eXtensible Local Area - * Network (IPV4oVXLAN) + * Total number of logical type Link Level Flow Control + * (LLFC) messages received with CRC error */ - #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \ - UINT32_C(0x9) - uint8_t unused_0[7]; + uint64_t rx_llfc_msgs_with_crc_err; + /* Total number of HCFC messages received */ + uint64_t rx_hcfc_msgs; + /* Total number of HCFC messages received with CRC error */ + uint64_t rx_hcfc_msgs_with_crc_err; + /* Total number of received bytes */ + uint64_t rx_bytes; + /* Total number of bytes received in runt frames */ + uint64_t rx_runt_bytes; + /* Total number of runt frames received */ + uint64_t rx_runt_frames; + /* Total Rx Discards per Port reported by STATS block */ + uint64_t rx_stat_discard; + uint64_t rx_stat_err; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_tunnel_dst_port_query_output { - uint16_t error_code; +/* Port Rx Statistics extended Formats */ +/* rx_port_stats_ext (size:320b/40B) */ +struct rx_port_stats_ext { + /* Number of times link state changed to down */ + uint64_t link_down_events; + /* Number of times the idle rings with pause bit are found */ + uint64_t continuous_pause_events; + /* Number of times the active rings pause bit resumed back */ + uint64_t resume_pause_events; + /* Number of times, the ROCE cos queue PFC is disabled to avoid pause flood/burst */ + uint64_t continuous_roce_pause_events; + /* Number of times, the ROCE cos queue PFC is enabled back */ + uint64_t resume_roce_pause_events; +} __attribute__((packed)); + +/* PCIe Statistics Formats */ +/* pcie_ctx_hw_stats (size:768b/96B) */ +struct pcie_ctx_hw_stats { + /* Number of physical layer receiver errors */ + uint64_t pcie_pl_signal_integrity; + /* Number of DLLP CRC errors detected by Data Link Layer */ + uint64_t pcie_dl_signal_integrity; + /* + * Number of TLP LCRC and sequence number errors detected + * by Data Link Layer + */ + uint64_t pcie_tl_signal_integrity; + /* Number of times LTSSM entered Recovery state */ + uint64_t pcie_link_integrity; + /* Number of TLP bytes that have been trasmitted */ + uint64_t pcie_tx_traffic_rate; + /* Number of TLP bytes that have been received */ + uint64_t pcie_rx_traffic_rate; + /* Number of DLLP bytes that have been trasmitted */ + uint64_t pcie_tx_dllp_statistics; + /* Number of DLLP bytes that have been received */ + uint64_t pcie_rx_dllp_statistics; + /* + * Number of times spent in each phase of gen3 + * equalization + */ + uint64_t pcie_equalization_time; + /* Records the last 16 transitions of the LTSSM */ + uint32_t pcie_ltssm_histogram[4]; + /* + * Record the last 8 reasons on why LTSSM transitioned + * to Recovery + */ + uint64_t pcie_recovery_histogram; +} __attribute__((packed)); + +/********************** + * hwrm_exec_fwd_resp * + **********************/ + + +/* hwrm_exec_fwd_resp_input (size:1024b/128B) */ +struct hwrm_exec_fwd_resp_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t tunnel_dst_port_id; + uint16_t seq_id; /* - * This field represents the identifier of L4 destination port - * used for the given tunnel type. This field is valid for - * specific tunnel types that use layer 4 (e.g. UDP) transports - * for tunneling. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t tunnel_dst_port_val; + uint16_t target_id; /* - * This field represents the value of L4 destination port - * identified by tunnel_dst_port_id. This field is valid for - * specific tunnel types that use layer 4 (e.g. UDP) transports - * for tunneling. This field is in network byte order. A value - * of 0 means that the destination port is not configured. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint64_t resp_addr; + /* + * This is an encapsulated request. This request should + * be executed by the HWRM and the response should be + * provided in the response buffer inside the encapsulated + * request. + */ + uint32_t encap_request[26]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This value indicates the target id of the response to + * the encapsulated request. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM */ + uint16_t encap_resp_target_id; + uint8_t unused_0[6]; } __attribute__((packed)); -/* hwrm_tunnel_dst_port_alloc */ -/* - * Description: This function is called by a driver to allocate l4 destination - * port for a specific tunnel type. The destination port value is provided in - * the input. If the HWRM supports only one global destination port for a tunnel - * type, then the HWRM shall keep track of its usage as described below. # The - * first caller that allocates a destination port shall always succeed and the - * HWRM shall save the destination port configuration for that tunnel type and - * increment the usage count to 1. # Subsequent callers allocating the same - * destination port for that tunnel type shall succeed and the HWRM shall - * increment the usage count for that port for each subsequent caller that - * succeeds. # Any subsequent caller trying to allocate a different destination - * port for that tunnel type shall fail until the usage count for the original - * destination port goes to zero. # A caller that frees a port will cause the - * usage count for that port to decrement. - */ -/* Input (24 bytes) */ -struct hwrm_tunnel_dst_port_alloc_input { - uint16_t req_type; +/* hwrm_exec_fwd_resp_output (size:128b/16B) */ +struct hwrm_exec_fwd_resp_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/************************ + * hwrm_reject_fwd_resp * + ************************/ + + +/* hwrm_reject_fwd_resp_input (size:1024b/128B) */ +struct hwrm_reject_fwd_resp_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; + /* + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. + */ + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint8_t tunnel_type; - /* Tunnel Type. */ - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ - UINT32_C(0x5) + uint64_t resp_addr; /* - * IPV4 over virtual eXtensible Local Area - * Network (IPV4oVXLAN) + * This is an encapsulated request. This request should + * be rejected by the HWRM and the error response should be + * provided in the response buffer inside the encapsulated + * request. */ - #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ - UINT32_C(0x9) - uint8_t unused_0; - uint16_t tunnel_dst_port_val; + uint32_t encap_request[26]; /* - * This field represents the value of L4 destination port used - * for the given tunnel type. This field is valid for specific - * tunnel types that use layer 4 (e.g. UDP) transports for - * tunneling. This field is in network byte order. A value of 0 - * shall fail the command. + * This value indicates the target id of the response to + * the encapsulated request. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM */ - uint32_t unused_1; + uint16_t encap_resp_target_id; + uint8_t unused_0[6]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_tunnel_dst_port_alloc_output { - uint16_t error_code; +/* hwrm_reject_fwd_resp_output (size:128b/16B) */ +struct hwrm_reject_fwd_resp_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint8_t valid; +} __attribute__((packed)); + +/***************** + * hwrm_fwd_resp * + *****************/ + + +/* hwrm_fwd_resp_input (size:1024b/128B) */ +struct hwrm_fwd_resp_input { + /* The HWRM command request type. */ + uint16_t req_type; + /* + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. + */ + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t tunnel_dst_port_id; + uint16_t seq_id; /* - * Identifier of a tunnel L4 destination port value. Only - * applies to tunnel types that has l4 destination port - * parameters. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t unused_4; - uint8_t valid; + uint16_t target_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ + uint64_t resp_addr; + /* + * This value indicates the target id of the encapsulated + * response. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - HWRM + */ + uint16_t encap_resp_target_id; + /* + * This value indicates the completion ring the encapsulated + * response will be optionally completed on. If the value is + * -1, then no CR completion shall be generated for the + * encapsulated response. Any other value must be a + * valid CR ring_id value. If a valid encap_resp_cmpl_ring + * is provided, then a CR completion shall be generated for + * the encapsulated response. + */ + uint16_t encap_resp_cmpl_ring; + /* This field indicates the length of encapsulated response. */ + uint16_t encap_resp_len; + uint8_t unused_0; + uint8_t unused_1; + /* + * This is the host address where the encapsulated response + * will be written. + * This area must be 16B aligned and must be cleared to zero + * before the original request is made. + */ + uint64_t encap_resp_addr; + /* This is an encapsulated response. */ + uint32_t encap_resp[24]; } __attribute__((packed)); -/* hwrm_tunnel_dst_port_free */ -/* - * Description: This function is called by a driver to free l4 destination port - * for a specific tunnel type. - */ -/* Input (24 bytes) */ -struct hwrm_tunnel_dst_port_free_input { - uint16_t req_type; +/* hwrm_fwd_resp_output (size:128b/16B) */ +struct hwrm_fwd_resp_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t cmpl_ring; + uint8_t valid; +} __attribute__((packed)); + +/***************************** + * hwrm_fwd_async_event_cmpl * + *****************************/ + + +/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */ +struct hwrm_fwd_async_event_cmpl_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t cmpl_ring; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t resp_addr; + uint16_t seq_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint8_t tunnel_type; - /* Tunnel Type. */ - /* Virtual eXtensible Local Area Network (VXLAN) */ - #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) - /* Generic Network Virtualization Encapsulation (Geneve) */ - #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) + uint16_t target_id; /* - * IPV4 over virtual eXtensible Local Area - * Network (IPV4oVXLAN) + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \ - UINT32_C(0x9) - uint8_t unused_0; - uint16_t tunnel_dst_port_id; + uint64_t resp_addr; /* - * Identifier of a tunnel L4 destination port value. Only - * applies to tunnel types that has l4 destination port - * parameters. + * This value indicates the target id of the encapsulated + * asynchronous event. + * 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors + * 0xFFFF - Broadcast to all children VFs (only applicable when + * a PF is the requester) + */ + uint16_t encap_async_event_target_id; + uint8_t unused_0[6]; + /* This is an encapsulated asynchronous event completion. */ + uint32_t encap_async_event_cmpl[4]; +} __attribute__((packed)); + +/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */ +struct hwrm_fwd_async_event_cmpl_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint32_t unused_1; + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_tunnel_dst_port_free_output { - uint16_t error_code; +/************************** + * hwrm_nvm_raw_write_blk * + **************************/ + + +/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */ +struct hwrm_nvm_raw_write_blk_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_stat_ctx_alloc */ -/* - * Description: This command allocates and does basic preparation for a stat - * context. - */ -/* Input (32 bytes) */ -struct hwrm_stat_ctx_alloc_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * 64-bit Host Source Address. + * This is the loation of the source data to be written. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint64_t host_src_addr; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * 32-bit Destination Address. + * This is the NVRAM byte-offset where the source data will be written to. */ - uint64_t resp_addr; + uint32_t dest_addr; + /* Length of data to be written, in bytes. */ + uint32_t len; +} __attribute__((packed)); + +/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */ +struct hwrm_nvm_raw_write_blk_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t stats_dma_addr; - /* This is the address for statistic block. */ - uint32_t update_period_ms; + uint8_t valid; +} __attribute__((packed)); + +/***************** + * hwrm_nvm_read * + *****************/ + + +/* hwrm_nvm_read_input (size:320b/40B) */ +struct hwrm_nvm_read_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * The statistic block update period in ms. e.g. 250ms, 500ms, - * 750ms, 1000ms. If update_period_ms is 0, then the stats - * update shall be never done and the DMA address shall not be - * used. In this case, the stat block can only be read by - * hwrm_stat_ctx_query command. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint8_t stat_ctx_flags; + uint16_t cmpl_ring; /* - * This field is used to specify statistics context specific - * configuration flags. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ + uint16_t seq_id; /* - * When this bit is set to '1', the statistics context shall be - * allocated for RoCE traffic only. In this case, traffic other - * than offloaded RoCE traffic shall not be included in this - * statistic context. When this bit is set to '0', the - * statistics context shall be used for the network traffic - * other than offloaded RoCE traffic. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1) - uint8_t unused_0[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_stat_ctx_alloc_output { - uint16_t error_code; + uint16_t target_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t resp_addr; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * 64-bit Host Destination Address. + * This is the host address where the data will be written to. */ - uint32_t stat_ctx_id; - /* This is the statistics context ID value. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint64_t host_dest_addr; + /* The 0-based index of the directory entry. */ + uint16_t dir_idx; + uint8_t unused_0[2]; + /* The NVRAM byte-offset to read from. */ + uint32_t offset; + /* The length of the data to be read, in bytes. */ + uint32_t len; + uint8_t unused_1[4]; +} __attribute__((packed)); + +/* hwrm_nvm_read_output (size:128b/16B) */ +struct hwrm_nvm_read_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_stat_ctx_free */ -/* Description: This command is used to free a stat context. */ -/* Input (24 bytes) */ -struct hwrm_stat_ctx_free_input { - uint16_t req_type; +/********************* + * hwrm_nvm_raw_dump * + *********************/ + + +/* hwrm_nvm_raw_dump_input (size:256b/32B) */ +struct hwrm_nvm_raw_dump_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t stat_ctx_id; - /* ID of the statistics context that is being queried. */ - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_stat_ctx_free_output { - uint16_t error_code; + uint64_t resp_addr; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * 64-bit Host Destination Address. + * This is the host address where the data will be written to. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t host_dest_addr; + /* 32-bit NVRAM byte-offset to read from. */ + uint32_t offset; + /* Total length of NVRAM contents to be read, in bytes. */ + uint32_t len; +} __attribute__((packed)); + +/* hwrm_nvm_raw_dump_output (size:128b/16B) */ +struct hwrm_nvm_raw_dump_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint32_t stat_ctx_id; - /* This is the statistics context ID value. */ - uint8_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t valid; + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_nvm_get_dir_entries * + ****************************/ + + +/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */ +struct hwrm_nvm_get_dir_entries_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ -} __attribute__((packed)); - -/* hwrm_stat_ctx_query */ -/* Description: This command returns statistics of a context. */ -/* Input (24 bytes) */ -struct hwrm_stat_ctx_query_input { - uint16_t req_type; + uint16_t cmpl_ring; /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t cmpl_ring; + uint16_t seq_id; /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t target_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t resp_addr; + uint64_t resp_addr; /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. + * 64-bit Host Destination Address. + * This is the host address where the directory will be written. */ - uint32_t stat_ctx_id; - /* ID of the statistics context that is being queried. */ - uint32_t unused_0; + uint64_t host_dest_addr; } __attribute__((packed)); -/* Output (176 bytes) */ -struct hwrm_stat_ctx_query_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last byte of - * the response is a valid flag that will read as '1' when the command - * has been completely written to memory. - */ - uint64_t tx_ucast_pkts; - /* Number of transmitted unicast packets */ - uint64_t tx_mcast_pkts; - /* Number of transmitted multicast packets */ - uint64_t tx_bcast_pkts; - /* Number of transmitted broadcast packets */ - uint64_t tx_err_pkts; - /* Number of transmitted packets with error */ - uint64_t tx_drop_pkts; - /* Number of dropped packets on transmit path */ - uint64_t tx_ucast_bytes; - /* Number of transmitted bytes for unicast traffic */ - uint64_t tx_mcast_bytes; - /* Number of transmitted bytes for multicast traffic */ - uint64_t tx_bcast_bytes; - /* Number of transmitted bytes for broadcast traffic */ - uint64_t rx_ucast_pkts; - /* Number of received unicast packets */ - uint64_t rx_mcast_pkts; - /* Number of received multicast packets */ - uint64_t rx_bcast_pkts; - /* Number of received broadcast packets */ - uint64_t rx_err_pkts; - /* Number of received packets with error */ - uint64_t rx_drop_pkts; - /* Number of dropped packets on received path */ - uint64_t rx_ucast_bytes; - /* Number of received bytes for unicast traffic */ - uint64_t rx_mcast_bytes; - /* Number of received bytes for multicast traffic */ - uint64_t rx_bcast_bytes; - /* Number of received bytes for broadcast traffic */ - uint64_t rx_agg_pkts; - /* Number of aggregated unicast packets */ - uint64_t rx_agg_bytes; - /* Number of aggregated unicast bytes */ - uint64_t rx_agg_events; - /* Number of aggregation events */ - uint64_t rx_agg_aborts; - /* Number of aborted aggregations */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; +/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */ +struct hwrm_nvm_get_dir_entries_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_stat_ctx_clr_stats */ -/* Description: This command clears statistics of a context. */ -/* Input (24 bytes) */ -struct hwrm_stat_ctx_clr_stats_input { - uint16_t req_type; +/************************* + * hwrm_nvm_get_dir_info * + *************************/ + + +/* hwrm_nvm_get_dir_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dir_info_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t stat_ctx_id; - /* ID of the statistics context that is being queried. */ - uint32_t unused_0; + uint64_t resp_addr; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_stat_ctx_clr_stats_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; +/* hwrm_nvm_get_dir_info_output (size:192b/24B) */ +struct hwrm_nvm_get_dir_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Number of directory entries in the directory. */ + uint32_t entries; + /* Size of each directory entry, in bytes. */ + uint32_t entry_length; + uint8_t unused_0[7]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* hwrm_exec_fwd_resp */ -/* - * Description: This command is used to send an encapsulated request to the - * HWRM. This command instructs the HWRM to execute the request and forward the - * response of the encapsulated request to the location specified in the - * original request that is encapsulated. The target id of this command shall be - * set to 0xFFFF (HWRM). The response location in this command shall be used to - * acknowledge the receipt of the encapsulated request and forwarding of the - * response. - */ -/* Input (128 bytes) */ -struct hwrm_exec_fwd_resp_input { - uint16_t req_type; +/****************** + * hwrm_nvm_write * + ******************/ + + +/* hwrm_nvm_write_input (size:384b/48B) */ +struct hwrm_nvm_write_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t cmpl_ring; + uint16_t cmpl_ring; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t seq_id; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t resp_addr; + uint16_t target_id; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t encap_request[26]; + uint64_t resp_addr; /* - * This is an encapsulated request. This request should be - * executed by the HWRM and the response should be provided in - * the response buffer inside the encapsulated request. + * 64-bit Host Source Address. + * This is where the source data is. */ - uint16_t encap_resp_target_id; + uint64_t host_src_addr; + /* The Directory Entry Type (valid values are defined in the bnxnvm_directory_type enum defined in the file bnxnvm_defs.h). */ + uint16_t dir_type; /* - * This value indicates the target id of the response to the - * encapsulated request. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - - * HWRM + * Directory ordinal. + * The 0-based instance of the combined Directory Entry Type and Extension. */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_exec_fwd_resp_output { - uint16_t error_code; + uint16_t dir_ordinal; + /* The Directory Entry Extension flags (see BNX_DIR_EXT_* in the file bnxnvm_defs.h). */ + uint16_t dir_ext; + /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the file bnxnvm_defs.h). */ + uint16_t dir_attr; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Length of data to write, in bytes. May be less than or equal to the allocated size for the directory entry. + * The data length stored in the directory entry will be updated to reflect this value once the write is complete. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint32_t dir_data_length; + /* Option. */ + uint16_t option; + uint16_t flags; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * When this bit is '1', the original active image + * will not be removed. TBD: what purpose is this? */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG \ + UINT32_C(0x1) /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The requested length of the allocated NVM for the item, in bytes. This value may be greater than or equal to the specified data length (dir_data_length). + * If this value is less than the specified data length, it will be ignored. + * The response will contain the actual allocated item length, which may be greater than the requested item length. + * The purpose for allocating more than the required number of bytes for an item's data is to pre-allocate extra storage (padding) to accomodate + * the potential future growth of an item (e.g. upgraded firmware with a size increase, log growth, expanded configuration data). */ + uint32_t dir_item_length; + uint32_t unused_0; } __attribute__((packed)); -/* hwrm_reject_fwd_resp */ -/* - * Description: This command is used to send an encapsulated request to the - * HWRM. This command instructs the HWRM to reject the request and forward the - * error response of the encapsulated request to the location specified in the - * original request that is encapsulated. The target id of this command shall be - * set to 0xFFFF (HWRM). The response location in this command shall be used to - * acknowledge the receipt of the encapsulated request and forwarding of the - * response. - */ -/* Input (128 bytes) */ -struct hwrm_reject_fwd_resp_input { - uint16_t req_type; +/* hwrm_nvm_write_output (size:128b/16B) */ +struct hwrm_nvm_write_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * Length of the allocated NVM for the item, in bytes. The value may be greater than or equal to the specified data length or the requested item length. + * The actual item length used when creating a new directory entry will be a multiple of an NVM block size. */ - uint16_t cmpl_ring; + uint32_t dir_item_length; + /* The directory index of the created or modified item. */ + uint16_t dir_idx; + uint8_t unused_0; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_nvm_write_cmd_err (size:64b/8B) */ +struct hwrm_nvm_write_cmd_err { /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t resp_addr; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_WRITE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* Unable to complete operation due to fragmentation */ + #define HWRM_NVM_WRITE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1) + /* nvm is completely full. */ + #define HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2) + #define HWRM_NVM_WRITE_CMD_ERR_CODE_LAST \ + HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE + uint8_t unused_0[7]; +} __attribute__((packed)); + +/******************* + * hwrm_nvm_modify * + *******************/ + + +/* hwrm_nvm_modify_input (size:320b/40B) */ +struct hwrm_nvm_modify_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint32_t encap_request[26]; + uint16_t cmpl_ring; /* - * This is an encapsulated request. This request should be - * rejected by the HWRM and the error response should be - * provided in the response buffer inside the encapsulated - * request. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t encap_resp_target_id; + uint16_t seq_id; /* - * This value indicates the target id of the response to the - * encapsulated request. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - - * HWRM + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t unused_0[3]; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_reject_fwd_resp_output { - uint16_t error_code; + uint16_t target_id; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint64_t resp_addr; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * 64-bit Host Source Address. + * This is where the modified data is. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint64_t host_src_addr; + /* 16-bit directory entry index. */ + uint16_t dir_idx; + uint8_t unused_0[2]; + /* 32-bit NVRAM byte-offset to modify content from. */ + uint32_t offset; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * Length of data to be modified, in bytes. The length shall + * be non-zero. */ + uint32_t len; + uint8_t unused_1[4]; } __attribute__((packed)); -/* hwrm_nvm_get_dir_entries */ -/* Input (24 bytes) */ -struct hwrm_nvm_get_dir_entries_input { - uint16_t req_type; - uint16_t cmpl_ring; - uint16_t seq_id; - uint16_t target_id; - uint64_t resp_addr; - uint64_t host_dest_addr; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_nvm_get_dir_entries_output { - uint16_t error_code; - uint16_t req_type; - uint16_t seq_id; - uint16_t resp_len; - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; +/* hwrm_nvm_modify_output (size:128b/16B) */ +struct hwrm_nvm_modify_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; } __attribute__((packed)); +/*************************** + * hwrm_nvm_find_dir_entry * + ***************************/ -/* hwrm_nvm_erase_dir_entry */ -/* Input (24 bytes) */ -struct hwrm_nvm_erase_dir_entry_input { - uint16_t req_type; - uint16_t cmpl_ring; - uint16_t seq_id; - uint16_t target_id; - uint64_t resp_addr; - uint16_t dir_idx; - uint16_t unused_0[3]; -}; - -/* Output (16 bytes) */ -struct hwrm_nvm_erase_dir_entry_output { - uint16_t error_code; - uint16_t req_type; - uint16_t seq_id; - uint16_t resp_len; - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; -}; - -/* hwrm_nvm_get_dir_info */ -/* Input (16 bytes) */ -struct hwrm_nvm_get_dir_info_input { - uint16_t req_type; - uint16_t cmpl_ring; - uint16_t seq_id; - uint16_t target_id; - uint64_t resp_addr; -} __attribute__((packed)); -/* Output (24 bytes) */ -struct hwrm_nvm_get_dir_info_output { - uint16_t error_code; +/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t entries; - /* Number of directory entries in the directory. */ - uint32_t entry_length; - /* Size of each directory entry, in bytes. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_nvm_write */ -/* - * Note: Write to the allocated NVRAM of an item referenced by an existing - * directory entry. - */ -/* Input (48 bytes) */ -struct hwrm_nvm_write_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + uint32_t enables; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the dir_idx_valid field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID \ + UINT32_C(0x1) + /* Directory Entry Index */ + uint16_t dir_idx; + /* Directory Entry (Image) Type */ + uint16_t dir_type; + /* + * Directory ordinal. + * The instance of this Directory Type + */ + uint16_t dir_ordinal; + /* The Directory Entry Extension flags. */ + uint16_t dir_ext; + /* This value indicates the search option using dir_ordinal. */ + uint8_t opt_ordinal; + /* This value indicates the search option using dir_ordinal. */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_MASK UINT32_C(0x3) + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_SFT 0 + /* Equal to specified ordinal value. */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_EQ UINT32_C(0x0) + /* Greater than or equal to specified ordinal value */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GE UINT32_C(0x1) + /* Greater than specified ordinal value */ + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT UINT32_C(0x2) + #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_LAST \ + HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT + uint8_t unused_0[3]; +} __attribute__((packed)); + +/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Allocated NVRAM for this directory entry, in bytes. */ + uint32_t dir_item_length; + /* Size of the stored data for this directory entry, in bytes. */ + uint32_t dir_data_length; + /* + * Firmware version. + * Only valid if the directory entry is for embedded firmware stored in APE_BIN Format. + */ + uint32_t fw_ver; + /* Directory ordinal. */ + uint16_t dir_ordinal; + /* Directory Entry Index */ + uint16_t dir_idx; + uint8_t unused_0[7]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/**************************** + * hwrm_nvm_erase_dir_entry * + ****************************/ + + +/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */ +struct hwrm_nvm_erase_dir_entry_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t resp_addr; + uint16_t cmpl_ring; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t host_src_addr; - /* 64-bit Host Source Address. This is where the source data is. */ - uint16_t dir_type; + uint16_t seq_id; /* - * The Directory Entry Type (valid values are defined in the - * bnxnvm_directory_type enum defined in the file - * bnxnvm_defs.h). + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t dir_ordinal; + uint16_t target_id; /* - * Directory ordinal. The 0-based instance of the combined - * Directory Entry Type and Extension. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t dir_ext; + uint64_t resp_addr; + /* Directory Entry Index */ + uint16_t dir_idx; + uint8_t unused_0[6]; +} __attribute__((packed)); + +/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_erase_dir_entry_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * The Directory Entry Extension flags (see BNX_DIR_EXT_* in the - * file bnxnvm_defs.h). + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t dir_attr; + uint8_t valid; +} __attribute__((packed)); + +/************************* + * hwrm_nvm_get_dev_info * + *************************/ + + +/* hwrm_nvm_get_dev_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dev_info_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the - * file bnxnvm_defs.h). + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint32_t dir_data_length; + uint16_t cmpl_ring; /* - * Length of data to write, in bytes. May be less than or equal - * to the allocated size for the directory entry. The data - * length stored in the directory entry will be updated to - * reflect this value once the write is complete. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t option; - /* Option. */ - uint16_t flags; + uint16_t seq_id; /* - * When this bit is '1', the original active image will not be - * removed. TBD: what purpose is this? + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG UINT32_C(0x1) - uint32_t dir_item_length; + uint16_t target_id; /* - * The requested length of the allocated NVM for the item, in - * bytes. This value may be greater than or equal to the - * specified data length (dir_data_length). If this value is - * less than the specified data length, it will be ignored. The - * response will contain the actual allocated item length, which - * may be greater than the requested item length. The purpose - * for allocating more than the required number of bytes for an - * item's data is to pre-allocate extra storage (padding) to - * accommodate the potential future growth of an item (e.g. - * upgraded firmware with a size increase, log growth, expanded - * configuration data). + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint32_t unused_0; + uint64_t resp_addr; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_nvm_write_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; +/* hwrm_nvm_get_dev_info_output (size:256b/32B) */ +struct hwrm_nvm_get_dev_info_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* Manufacturer ID. */ + uint16_t manufacturer_id; + /* Device ID. */ + uint16_t device_id; + /* Sector size of the NVRAM device. */ + uint32_t sector_size; + /* Total size, in bytes of the NVRAM device. */ + uint32_t nvram_size; + uint32_t reserved_size; + /* Available size that can be used, in bytes. Available size is the NVRAM size take away the used size and reserved size. */ + uint32_t available_size; + uint8_t unused_0[3]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/************************** + * hwrm_nvm_mod_dir_entry * + **************************/ + + +/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_mod_dir_entry_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint32_t dir_item_length; + uint16_t cmpl_ring; /* - * Length of the allocated NVM for the item, in bytes. The value - * may be greater than or equal to the specified data length or - * the requested item length. The actual item length used when - * creating a new directory entry will be a multiple of an NVM - * block size. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint16_t dir_idx; - /* The directory index of the created or modified item. */ - uint8_t unused_0; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* hwrm_nvm_read */ -/* - * Note: Read the contents of an NVRAM item as referenced (indexed) by an - * existing directory entry. - */ -/* Input (40 bytes) */ -struct hwrm_nvm_read_input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + uint32_t enables; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * This bit must be '1' for the checksum field to be + * configured. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + #define HWRM_NVM_MOD_DIR_ENTRY_INPUT_ENABLES_CHECKSUM UINT32_C(0x1) + /* Directory Entry Index */ + uint16_t dir_idx; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * Directory ordinal. + * The (0-based) instance of this Directory Type. */ - uint64_t resp_addr; + uint16_t dir_ordinal; + /* The Directory Entry Extension flags (see BNX_DIR_EXT_* for extension flag definitions). */ + uint16_t dir_ext; + /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* for attribute flag definitions). */ + uint16_t dir_attr; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * If valid, then this field updates the checksum + * value of the content in the directory entry. */ - uint64_t host_dest_addr; + uint32_t checksum; +} __attribute__((packed)); + +/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_mod_dir_entry_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * 64-bit Host Destination Address. This is the host address - * where the data will be written to. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint16_t dir_idx; - /* The 0-based index of the directory entry. */ - uint8_t unused_0; - uint8_t unused_1; - uint32_t offset; - /* The NVRAM byte-offset to read from. */ - uint32_t len; - /* The length of the data to be read, in bytes. */ - uint32_t unused_2; + uint8_t valid; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_nvm_read_output { - uint16_t error_code; +/************************** + * hwrm_nvm_verify_update * + **************************/ + + +/* hwrm_nvm_verify_update_input (size:192b/24B) */ +struct hwrm_nvm_verify_update_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; + uint16_t seq_id; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ -} __attribute__((packed)); - -/* Hardware Resource Manager Specification */ -/* Description: This structure is used to specify port description. */ -/* - * Note: The Hardware Resource Manager (HWRM) manages various hardware resources - * inside the chip. The HWRM is implemented in firmware, and runs on embedded - * processors inside the chip. This firmware service is vital part of the chip. - * The chip can not be used by a driver or HWRM client without the HWRM. - */ -/* Input (16 bytes) */ -struct input { - uint16_t req_type; + uint16_t target_id; /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint16_t cmpl_ring; + uint64_t resp_addr; + /* Directory Entry Type, to be verified. */ + uint16_t dir_type; /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. + * Directory ordinal. + * The instance of the Directory Type to be verified. */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; + uint16_t dir_ordinal; /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM + * The Directory Entry Extension flags. + * The "UPDATE" extension flag must be set in this value. + * A corresponding directory entry with the same type and ordinal values but *without* + * the "UPDATE" extension flag must also exist. The other flags of the extension must + * be identical between the active and update entries. */ - uint64_t resp_addr; + uint16_t dir_ext; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_nvm_verify_update_output (size:128b/16B) */ +struct hwrm_nvm_verify_update_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* Output (8 bytes) */ -struct output { - uint16_t error_code; +/*************************** + * hwrm_nvm_install_update * + ***************************/ + + +/* hwrm_nvm_install_update_input (size:192b/24B) */ +struct hwrm_nvm_install_update_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + uint16_t cmpl_ring; /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ -} __attribute__((packed)); - -/* Short Command Structure (16 bytes) */ -struct hwrm_short_input { - uint16_t req_type; + uint16_t seq_id; /* - * This field indicates the type of request in the request - * buffer. The format for the rest of the command (request) is - * determined by this field. + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint16_t signature; + uint16_t target_id; /* - * This field indicates a signature that is used to identify - * short form of the command listed here. This field shall be - * set to 17185 (0x4321). + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - /* Signature indicating this is a short form of HWRM command */ - #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD UINT32_C(0x4321) - uint16_t unused_0; - /* Reserved for future use. */ - uint16_t size; - /* This value indicates the length of the request. */ - uint64_t req_addr; - /* - * This is the host address where the request was written. This - * area must be 16B aligned. - */ -} __attribute__((packed)); - -#define HWRM_GET_HWRM_ERROR_CODE(arg) \ - { \ - typeof(arg) x = (arg); \ - ((x) == 0xf ? "HWRM_ERROR" : \ - ((x) == 0xffff ? "CMD_NOT_SUPPORTED" : \ - ((x) == 0xfffe ? "UNKNOWN_ERR" : \ - ((x) == 0x4 ? "RESOURCE_ALLOC_ERROR" : \ - ((x) == 0x5 ? "INVALID_FLAGS" : \ - ((x) == 0x6 ? "INVALID_ENABLES" : \ - ((x) == 0x0 ? "SUCCESS" : \ - ((x) == 0x1 ? "FAIL" : \ - ((x) == 0x2 ? "INVALID_PARAMS" : \ - ((x) == 0x3 ? "RESOURCE_ACCESS_DENIED" : \ - "Unknown error_code")))))))))) \ - } - -/* Return Codes (8 bytes) */ -struct ret_codes { - uint16_t error_code; - /* These are numbers assigned to return/error codes. */ - /* Request was successfully executed by the HWRM. */ - #define HWRM_ERR_CODE_SUCCESS (UINT32_C(0x0)) - /* THe HWRM failed to execute the request. */ - #define HWRM_ERR_CODE_FAIL (UINT32_C(0x1)) + uint64_t resp_addr; /* - * The request contains invalid argument(s) or - * input parameters. + * Installation type. If the value 3 through 0xffff is used, + * only packaged items with that type value will be installed and + * conditional installation directives for those packaged items + * will be over-ridden (i.e. 'create' or 'replace' will be treated + * as 'install'). */ - #define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2)) + uint32_t install_type; /* - * The requester is not allowed to access the - * requested resource. This error code shall be - * provided in a response to a request to query - * or modify an existing resource that is not - * accessible by the requester. + * Perform a normal package installation. Conditional installation + * directives (e.g. 'create' and 'replace') of packaged items + * will be followed. */ - #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3)) + #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_NORMAL UINT32_C(0x0) /* - * The HWRM is unable to allocate the requested - * resource. This code only applies to requests - * for HWRM resource allocations. + * Install all packaged items regardless of installation directive + * (i.e. treat all packaged items as though they have an installation + * directive of 'install'). */ - #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (UINT32_C(0x4)) - /* Invalid combination of flags is specified in the request. */ - #define HWRM_ERR_CODE_INVALID_FLAGS (UINT32_C(0x5)) + #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL \ + UINT32_C(0xffffffff) + #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_LAST \ + HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL + uint16_t flags; + /* If set to 1, then securely erase all unused locations in persistent storage. */ + #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ERASE_UNUSED_SPACE \ + UINT32_C(0x1) /* - * Invalid combination of enables fields is - * specified in the request. + * If set to 1, then unspecifed images, images not in the package file, will be safely deleted. + * When combined with erase_unused_space then unspecified images will be securely erased. */ - #define HWRM_ERR_CODE_INVALID_ENABLES (UINT32_C(0x6)) + #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_REMOVE_UNUSED_PKG \ + UINT32_C(0x2) /* - * Generic HWRM execution error that represents - * an internal error. + * If set to 1, FW will defragment the NVM if defragmentation is required for the update. + * Allow additional time for this command to complete if this bit is set to 1. */ - #define HWRM_ERR_CODE_HWRM_ERROR (UINT32_C(0xf)) - /* Unknown error */ - #define HWRM_ERR_CODE_UNKNOWN_ERR (UINT32_C(0xfffe)) - /* Unsupported or invalid command */ - #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (UINT32_C(0xffff)) - uint16_t unused_0[3]; + #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ALLOWED_TO_DEFRAG \ + UINT32_C(0x4) + uint8_t unused_0[2]; } __attribute__((packed)); -/* Output (16 bytes) */ -struct hwrm_err_output { - uint16_t error_code; +/* hwrm_nvm_install_update_output (size:192b/24B) */ +struct hwrm_nvm_install_update_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate + * Bit-mask of successfully installed items. + * Bit-0 corresponding to the first packaged item, Bit-1 for the second item, etc. + * A value of 0 indicates that no items were successfully installed. + */ + uint64_t installed_items; + /* result is 8 b */ + uint8_t result; + /* There was no problem with the package installation. */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS UINT32_C(0x0) + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_LAST \ + HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS + /* problem_item is 8 b */ + uint8_t problem_item; + /* There was no problem with any packaged items. */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_NONE \ + UINT32_C(0x0) + /* There was a problem with the NVM package itself. */ + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE \ + UINT32_C(0xff) + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_LAST \ + HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE + /* reset_required is 8 b */ + uint8_t reset_required; + /* + * No reset is required for installed/updated firmware or + * microcode to take effect. */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_NONE \ + UINT32_C(0x0) /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. + * A PCIe reset (e.g. system reboot) is + * required for newly installed/updated firmware or + * microcode to take effect. */ - uint32_t opaque_0; - /* debug info for this error response. */ - uint16_t opaque_1; - /* debug info for this error response. */ - uint8_t cmd_err; + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_PCI \ + UINT32_C(0x1) /* - * In the case of an error response, command specific error code - * is returned in this field. + * A controller power reset (e.g. system power-cycle) is + * required for newly installed/updated firmware or + * microcode to take effect. Some newly installed/updated + * firmware or microcode may still take effect upon the + * next PCIe reset. */ - uint8_t valid; + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER \ + UINT32_C(0x2) + #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_LAST \ + HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER + uint8_t unused_0[4]; /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ + uint8_t valid; } __attribute__((packed)); -/* Port Tx Statistics Formats (408 bytes) */ -struct tx_port_stats { - uint64_t tx_64b_frames; - /* Total Number of 64 Bytes frames transmitted */ - uint64_t tx_65b_127b_frames; - /* Total Number of 65-127 Bytes frames transmitted */ - uint64_t tx_128b_255b_frames; - /* Total Number of 128-255 Bytes frames transmitted */ - uint64_t tx_256b_511b_frames; - /* Total Number of 256-511 Bytes frames transmitted */ - uint64_t tx_512b_1023b_frames; - /* Total Number of 512-1023 Bytes frames transmitted */ - uint64_t tx_1024b_1518_frames; - /* Total Number of 1024-1518 Bytes frames transmitted */ - uint64_t tx_good_vlan_frames; +/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */ +struct hwrm_nvm_install_update_cmd_err { /* - * Total Number of each good VLAN (exludes FCS errors) frame - * transmitted which is 1519 to 1522 bytes in length inclusive - * (excluding framing bits but including FCS bytes). + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t tx_1519b_2047_frames; - /* Total Number of 1519-2047 Bytes frames transmitted */ - uint64_t tx_2048b_4095b_frames; - /* Total Number of 2048-4095 Bytes frames transmitted */ - uint64_t tx_4096b_9216b_frames; - /* Total Number of 4096-9216 Bytes frames transmitted */ - uint64_t tx_9217b_16383b_frames; - /* Total Number of 9217-16383 Bytes frames transmitted */ - uint64_t tx_good_frames; - /* Total Number of good frames transmitted */ - uint64_t tx_total_frames; - /* Total Number of frames transmitted */ - uint64_t tx_ucast_frames; - /* Total number of unicast frames transmitted */ - uint64_t tx_mcast_frames; - /* Total number of multicast frames transmitted */ - uint64_t tx_bcast_frames; - /* Total number of broadcast frames transmitted */ - uint64_t tx_pause_frames; - /* Total number of PAUSE control frames transmitted */ - uint64_t tx_pfc_frames; - /* Total number of PFC/per-priority PAUSE control frames transmitted */ - uint64_t tx_jabber_frames; - /* Total number of jabber frames transmitted */ - uint64_t tx_fcs_err_frames; - /* Total number of frames transmitted with FCS error */ - uint64_t tx_control_frames; - /* Total number of control frames transmitted */ - uint64_t tx_oversz_frames; - /* Total number of over-sized frames transmitted */ - uint64_t tx_single_dfrl_frames; - /* Total number of frames with single deferral */ - uint64_t tx_multi_dfrl_frames; - /* Total number of frames with multiple deferrals */ - uint64_t tx_single_coll_frames; - /* Total number of frames with single collision */ - uint64_t tx_multi_coll_frames; - /* Total number of frames with multiple collisions */ - uint64_t tx_late_coll_frames; - /* Total number of frames with late collisions */ - uint64_t tx_excessive_coll_frames; - /* Total number of frames with excessive collisions */ - uint64_t tx_frag_frames; - /* Total number of fragmented frames transmitted */ - uint64_t tx_err; - /* Total number of transmit errors */ - uint64_t tx_tagged_frames; - /* Total number of single VLAN tagged frames transmitted */ - uint64_t tx_dbl_tagged_frames; - /* Total number of double VLAN tagged frames transmitted */ - uint64_t tx_runt_frames; - /* Total number of runt frames transmitted */ - uint64_t tx_fifo_underruns; - /* Total number of TX FIFO under runs */ - uint64_t tx_pfc_ena_frames_pri0; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* Unable to complete operation due to fragmentation */ + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1) + /* nvm is completely full. */ + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2) + #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST \ + HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE + uint8_t unused_0[7]; +} __attribute__((packed)); + +/****************** + * hwrm_nvm_flush * + ******************/ + + +/* hwrm_nvm_flush_input (size:128b/16B) */ +struct hwrm_nvm_flush_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Total number of PFC frames with PFC enabled bit for Pri 0 - * transmitted + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t tx_pfc_ena_frames_pri1; + uint16_t cmpl_ring; /* - * Total number of PFC frames with PFC enabled bit for Pri 1 - * transmitted + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t tx_pfc_ena_frames_pri2; + uint16_t seq_id; /* - * Total number of PFC frames with PFC enabled bit for Pri 2 - * transmitted + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t tx_pfc_ena_frames_pri3; + uint16_t target_id; /* - * Total number of PFC frames with PFC enabled bit for Pri 3 - * transmitted + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t tx_pfc_ena_frames_pri4; + uint64_t resp_addr; +} __attribute__((packed)); + +/* hwrm_nvm_flush_output (size:128b/16B) */ +struct hwrm_nvm_flush_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Total number of PFC frames with PFC enabled bit for Pri 4 - * transmitted + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t tx_pfc_ena_frames_pri5; + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_nvm_flush_cmd_err (size:64b/8B) */ +struct hwrm_nvm_flush_cmd_err { /* - * Total number of PFC frames with PFC enabled bit for Pri 5 - * transmitted + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t tx_pfc_ena_frames_pri6; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_FLUSH_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* flush could not be performed */ + #define HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL UINT32_C(0x1) + #define HWRM_NVM_FLUSH_CMD_ERR_CODE_LAST \ + HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL + uint8_t unused_0[7]; +} __attribute__((packed)); + +/************************* + * hwrm_nvm_get_variable * + *************************/ + + +/* hwrm_nvm_get_variable_input (size:320b/40B) */ +struct hwrm_nvm_get_variable_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Total number of PFC frames with PFC enabled bit for Pri 6 - * transmitted + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t tx_pfc_ena_frames_pri7; + uint16_t cmpl_ring; /* - * Total number of PFC frames with PFC enabled bit for Pri 7 - * transmitted + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t tx_eee_lpi_events; - /* Total number of EEE LPI Events on TX */ - uint64_t tx_eee_lpi_duration; - /* EEE LPI Duration Counter on TX */ - uint64_t tx_llfc_logical_msgs; + uint16_t seq_id; /* - * Total number of Link Level Flow Control (LLFC) messages - * transmitted + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t tx_hcfc_msgs; - /* Total number of HCFC messages transmitted */ - uint64_t tx_total_collisions; - /* Total number of TX collisions */ - uint64_t tx_bytes; - /* Total number of transmitted bytes */ - uint64_t tx_xthol_frames; - /* Total number of end-to-end HOL frames */ - uint64_t tx_stat_discard; - /* Total Tx Drops per Port reported by STATS block */ - uint64_t tx_stat_error; - /* Total Tx Error Drops per Port reported by STATS block */ -} __attribute__((packed)); - -/* Port Rx Statistics Formats (528 bytes) */ -struct rx_port_stats { - uint64_t rx_64b_frames; - /* Total Number of 64 Bytes frames received */ - uint64_t rx_65b_127b_frames; - /* Total Number of 65-127 Bytes frames received */ - uint64_t rx_128b_255b_frames; - /* Total Number of 128-255 Bytes frames received */ - uint64_t rx_256b_511b_frames; - /* Total Number of 256-511 Bytes frames received */ - uint64_t rx_512b_1023b_frames; - /* Total Number of 512-1023 Bytes frames received */ - uint64_t rx_1024b_1518_frames; - /* Total Number of 1024-1518 Bytes frames received */ - uint64_t rx_good_vlan_frames; + uint16_t target_id; /* - * Total Number of each good VLAN (exludes FCS errors) frame - * received which is 1519 to 1522 bytes in length inclusive - * (excluding framing bits but including FCS bytes). + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t rx_1519b_2047b_frames; - /* Total Number of 1519-2047 Bytes frames received */ - uint64_t rx_2048b_4095b_frames; - /* Total Number of 2048-4095 Bytes frames received */ - uint64_t rx_4096b_9216b_frames; - /* Total Number of 4096-9216 Bytes frames received */ - uint64_t rx_9217b_16383b_frames; - /* Total Number of 9217-16383 Bytes frames received */ - uint64_t rx_total_frames; - /* Total number of frames received */ - uint64_t rx_ucast_frames; - /* Total number of unicast frames received */ - uint64_t rx_mcast_frames; - /* Total number of multicast frames received */ - uint64_t rx_bcast_frames; - /* Total number of broadcast frames received */ - uint64_t rx_fcs_err_frames; - /* Total number of received frames with FCS error */ - uint64_t rx_ctrl_frames; - /* Total number of control frames received */ - uint64_t rx_pause_frames; - /* Total number of PAUSE frames received */ - uint64_t rx_pfc_frames; - /* Total number of PFC frames received */ - uint64_t rx_unsupported_opcode_frames; - /* Total number of frames received with an unsupported opcode */ - uint64_t rx_unsupported_da_pausepfc_frames; + uint64_t resp_addr; /* - * Total number of frames received with an unsupported DA for - * pause and PFC - */ - uint64_t rx_wrong_sa_frames; - /* Total number of frames received with an unsupported SA */ - uint64_t rx_align_err_frames; - /* Total number of received packets with alignment error */ - uint64_t rx_oor_len_frames; - /* Total number of received frames with out-of-range length */ - uint64_t rx_code_err_frames; - /* Total number of received frames with error termination */ - uint64_t rx_false_carrier_frames; + * This is the host address where + * nvm variable will be stored + */ + uint64_t dest_data_addr; + /* size of data in bits */ + uint16_t data_len; + /* nvm cfg option number */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_LAST \ + HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF + /* + * Number of dimensions for this nvm configuration variable. + * This value indicates how many of the indexN values to use. + * A value of 0 means that none of the indexN values are valid. + * A value of 1 requires at index0 is valued, a value of 2 + * requires that index0 and index1 are valid, and so forth + */ + uint16_t dimensions; + /* index for the 1st dimensions */ + uint16_t index_0; + /* index for the 2nd dimensions */ + uint16_t index_1; + /* index for the 3rd dimensions */ + uint16_t index_2; + /* index for the 4th dimensions */ + uint16_t index_3; + uint8_t flags; + /* + * When this bit is set to 1, the factory default value will be returned, + * 0 returns the operational value. + */ + #define HWRM_NVM_GET_VARIABLE_INPUT_FLAGS_FACTORY_DFLT \ + UINT32_C(0x1) + uint8_t unused_0; +} __attribute__((packed)); + +/* hwrm_nvm_get_variable_output (size:128b/16B) */ +struct hwrm_nvm_get_variable_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + /* size of data of the actual variable retrieved in bits */ + uint16_t data_len; + /* + * option_num is the option number for the data retrieved. It is possible in the + * future that the option number returned would be different than requested. This + * condition could occur if an option is deprecated and a new option id is defined + * with similar characteristics, but has a slightly different definition. This + * also makes it convenient for the caller to identify the variable result with + * the option id from the response. + */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_LAST \ + HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF + uint8_t unused_0[3]; /* - * Total number of received frames with a false carrier is - * detected during idle, as defined by RX_ER samples active and - * RXD is 0xE. The event is reported along with the statistics - * generated on the next received frame. Only one false carrier - * condition can be detected and logged between frames. Carrier - * event, valid for 10M/100M speed modes only. + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t rx_ovrsz_frames; - /* Total number of over-sized frames received */ - uint64_t rx_jbr_frames; - /* Total number of jabber packets received */ - uint64_t rx_mtu_err_frames; - /* Total number of received frames with MTU error */ - uint64_t rx_match_crc_frames; - /* Total number of received frames with CRC match */ - uint64_t rx_promiscuous_frames; - /* Total number of frames received promiscuously */ - uint64_t rx_tagged_frames; - /* Total number of received frames with one or two VLAN tags */ - uint64_t rx_double_tagged_frames; - /* Total number of received frames with two VLAN tags */ - uint64_t rx_trunc_frames; - /* Total number of truncated frames received */ - uint64_t rx_good_frames; - /* Total number of good frames (without errors) received */ - uint64_t rx_pfc_xon2xoff_frames_pri0; + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_get_variable_cmd_err { /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 0 + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t rx_pfc_xon2xoff_frames_pri1; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* variable does not exist */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1) + /* configuration is corrupted and the variable cannot be saved */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2) + /* length specified is too small */ + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT UINT32_C(0x3) + #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LAST \ + HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT + uint8_t unused_0[7]; +} __attribute__((packed)); + +/************************* + * hwrm_nvm_set_variable * + *************************/ + + +/* hwrm_nvm_set_variable_input (size:320b/40B) */ +struct hwrm_nvm_set_variable_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 1 + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t rx_pfc_xon2xoff_frames_pri2; + uint16_t cmpl_ring; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 2 + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t rx_pfc_xon2xoff_frames_pri3; + uint16_t seq_id; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 3 + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t rx_pfc_xon2xoff_frames_pri4; + uint16_t target_id; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 4 + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t rx_pfc_xon2xoff_frames_pri5; + uint64_t resp_addr; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 5 - */ - uint64_t rx_pfc_xon2xoff_frames_pri6; + * This is the host address where + * nvm variable will be copied from + */ + uint64_t src_data_addr; + /* size of data in bits */ + uint16_t data_len; + /* nvm cfg option number */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_LAST \ + HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF + /* + * Number of dimensions for this nvm configuration variable. + * This value indicates how many of the indexN values to use. + * A value of 0 means that none of the indexN values are valid. + * A value of 1 requires at index0 is valued, a value of 2 + * requires that index0 and index1 are valid, and so forth + */ + uint16_t dimensions; + /* index for the 1st dimensions */ + uint16_t index_0; + /* index for the 2nd dimensions */ + uint16_t index_1; + /* index for the 3rd dimensions */ + uint16_t index_2; + /* index for the 4th dimensions */ + uint16_t index_3; + uint8_t flags; + /* When this bit is 1, flush internal cache after this write operation (see hwrm_nvm_flush command.) */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FORCE_FLUSH \ + UINT32_C(0x1) + /* encryption method */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_MASK \ + UINT32_C(0xe) + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_SFT 1 + /* No encryption. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_NONE \ + (UINT32_C(0x0) << 1) + /* one-way encryption. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1 \ + (UINT32_C(0x1) << 1) + /* symmetric AES256 encryption. */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_AES256 \ + (UINT32_C(0x2) << 1) + /* SHA1 digest appended to plaintext contents, for authentication */ + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH \ + (UINT32_C(0x3) << 1) + #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_LAST \ + HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH + uint8_t unused_0; +} __attribute__((packed)); + +/* hwrm_nvm_set_variable_output (size:128b/16B) */ +struct hwrm_nvm_set_variable_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t unused_0[7]; /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 6 + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t rx_pfc_xon2xoff_frames_pri7; + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_set_variable_cmd_err { /* - * Total number of received PFC frames with transition from XON - * to XOFF on Pri 7 + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t rx_pfc_ena_frames_pri0; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* variable does not exist */ + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1) + /* configuration is corrupted and the variable cannot be saved */ + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2) + #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_LAST \ + HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR + uint8_t unused_0[7]; +} __attribute__((packed)); + +/**************************** + * hwrm_nvm_validate_option * + ****************************/ + + +/* hwrm_nvm_validate_option_input (size:320b/40B) */ +struct hwrm_nvm_validate_option_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 0 + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t rx_pfc_ena_frames_pri1; + uint16_t cmpl_ring; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 1 + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t rx_pfc_ena_frames_pri2; + uint16_t seq_id; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 2 + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t rx_pfc_ena_frames_pri3; + uint16_t target_id; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 3 + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t rx_pfc_ena_frames_pri4; + uint64_t resp_addr; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 4 + * This is the host address where + * nvm variable will be copied from */ - uint64_t rx_pfc_ena_frames_pri5; + uint64_t src_data_addr; + /* size of data in bits */ + uint16_t data_len; + /* nvm cfg option number */ + uint16_t option_num; + /* reserved. */ + #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_0 \ + UINT32_C(0x0) + /* reserved. */ + #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF \ + UINT32_C(0xffff) + #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_LAST \ + HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF + /* + * Number of dimensions for this nvm configuration variable. + * This value indicates how many of the indexN values to use. + * A value of 0 means that none of the indexN values are valid. + * A value of 1 requires at index0 is valued, a value of 2 + * requires that index0 and index1 are valid, and so forth + */ + uint16_t dimensions; + /* index for the 1st dimensions */ + uint16_t index_0; + /* index for the 2nd dimensions */ + uint16_t index_1; + /* index for the 3rd dimensions */ + uint16_t index_2; + /* index for the 4th dimensions */ + uint16_t index_3; + uint8_t unused_0[2]; +} __attribute__((packed)); + +/* hwrm_nvm_validate_option_output (size:128b/16B) */ +struct hwrm_nvm_validate_option_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t result; + /* indicates that the value provided for the option is not matching with the saved data. */ + #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_NOT_MATCH UINT32_C(0x0) + /* indicates that the value provided for the option is matching the saved data. */ + #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH UINT32_C(0x1) + #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_LAST \ + HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH + uint8_t unused_0[6]; + /* + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */ +struct hwrm_nvm_validate_option_cmd_err { /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 5 + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. */ - uint64_t rx_pfc_ena_frames_pri6; + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST \ + HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN + uint8_t unused_0[7]; +} __attribute__((packed)); + +/***************************** + * hwrm_nvm_factory_defaults * + *****************************/ + + +/* hwrm_nvm_factory_defaults_input (size:192b/24B) */ +struct hwrm_nvm_factory_defaults_input { + /* The HWRM command request type. */ + uint16_t req_type; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 6 + * The completion ring to send the completion event on. This should + * be the NQ ID returned from the `nq_alloc` HWRM command. */ - uint64_t rx_pfc_ena_frames_pri7; + uint16_t cmpl_ring; /* - * Total number of received PFC frames with PFC enabled bit for - * Pri 7 + * The sequence ID is used by the driver for tracking multiple + * commands. This ID is treated as opaque data by the firmware and + * the value is returned in the `hwrm_resp_hdr` upon completion. */ - uint64_t rx_sch_crc_err_frames; - /* Total Number of frames received with SCH CRC error */ - uint64_t rx_undrsz_frames; - /* Total Number of under-sized frames received */ - uint64_t rx_frag_frames; - /* Total Number of fragmented frames received */ - uint64_t rx_eee_lpi_events; - /* Total number of RX EEE LPI Events */ - uint64_t rx_eee_lpi_duration; - /* EEE LPI Duration Counter on RX */ - uint64_t rx_llfc_physical_msgs; + uint16_t seq_id; /* - * Total number of physical type Link Level Flow Control (LLFC) - * messages received + * The target ID of the command: + * * 0x0-0xFFF8 - The function ID + * * 0xFFF8-0xFFFE - Reserved for internal processors + * * 0xFFFF - HWRM */ - uint64_t rx_llfc_logical_msgs; + uint16_t target_id; /* - * Total number of logical type Link Level Flow Control (LLFC) - * messages received + * A physical address pointer pointing to a host buffer that the + * command's response data will be written. This can be either a host + * physical address (HPA) or a guest physical address (GPA) and must + * point to a physically contiguous block of memory. */ - uint64_t rx_llfc_msgs_with_crc_err; + uint64_t resp_addr; + /* mode is 8 b */ + uint8_t mode; + /* If set to 1, it will trigger restoration of factory default settings */ + #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_RESTORE UINT32_C(0x0) + /* If set to 1, it will trigger creation of factory default settings */ + #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE UINT32_C(0x1) + #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_LAST \ + HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE + uint8_t unused_0[7]; +} __attribute__((packed)); + +/* hwrm_nvm_factory_defaults_output (size:128b/16B) */ +struct hwrm_nvm_factory_defaults_output { + /* The specific error status for the command. */ + uint16_t error_code; + /* The HWRM command request type. */ + uint16_t req_type; + /* The sequence ID from the original command. */ + uint16_t seq_id; + /* The length of the response data in number of bytes. */ + uint16_t resp_len; + uint8_t result; + /* factory defaults created successfully. */ + #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_OK \ + UINT32_C(0x0) + /* factory defaults restored successfully. */ + #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_RESTORE_OK \ + UINT32_C(0x1) + /* factory defaults already created. */ + #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY \ + UINT32_C(0x2) + #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_LAST \ + HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY + uint8_t unused_0[6]; /* - * Total number of logical type Link Level Flow Control (LLFC) - * messages received with CRC error + * This field is used in Output records to indicate that the output + * is completely written to RAM. This field should be read as '1' + * to indicate that the output has been completely written. + * When writing a command completion or response to an internal processor, + * the order of writes has to be such that this field is written last. */ - uint64_t rx_hcfc_msgs; - /* Total number of HCFC messages received */ - uint64_t rx_hcfc_msgs_with_crc_err; - /* Total number of HCFC messages received with CRC error */ - uint64_t rx_bytes; - /* Total number of received bytes */ - uint64_t rx_runt_bytes; - /* Total number of bytes received in runt frames */ - uint64_t rx_runt_frames; - /* Total number of runt frames received */ - uint64_t rx_stat_discard; - /* Total Rx Discards per Port reported by STATS block */ - uint64_t rx_stat_err; - /* Total Rx Error Drops per Port reported by STATS block */ + uint8_t valid; } __attribute__((packed)); -/* Periodic Statistics Context DMA to host (160 bytes) */ -/* - * per-context HW statistics -- chip view - */ +/* hwrm_nvm_factory_defaults_cmd_err (size:64b/8B) */ +struct hwrm_nvm_factory_defaults_cmd_err { + /* + * command specific error codes that goes to + * the cmd_err field in Common HWRM Error Response. + */ + uint8_t code; + /* Unknown error */ + #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN \ + UINT32_C(0x0) + /* valid configuration not present to create defaults */ + #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG \ + UINT32_C(0x1) + /* No saved configuration present to restore, restore failed */ + #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG \ + UINT32_C(0x2) + #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_LAST \ + HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG + uint8_t unused_0[7]; +} __attribute__((packed)); -struct ctx_hw_stats64 { - uint64_t rx_ucast_pkts; - uint64_t rx_mcast_pkts; - uint64_t rx_bcast_pkts; - uint64_t rx_drop_pkts; - uint64_t rx_discard_pkts; - uint64_t rx_ucast_bytes; - uint64_t rx_mcast_bytes; - uint64_t rx_bcast_bytes; - - uint64_t tx_ucast_pkts; - uint64_t tx_mcast_pkts; - uint64_t tx_bcast_pkts; - uint64_t tx_drop_pkts; - uint64_t tx_discard_pkts; - uint64_t tx_ucast_bytes; - uint64_t tx_mcast_bytes; - uint64_t tx_bcast_bytes; - - uint64_t tpa_pkts; - uint64_t tpa_bytes; - uint64_t tpa_events; - uint64_t tpa_aborts; -} __attribute__((packed)); - -#endif /* _HSI_STRUCT_DEF_DPDK_ */ +#endif /* _HSI_STRUCT_DEF_DPDK_H_ */ diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c index cae95f8f..c298de83 100644 --- a/drivers/net/bnxt/rte_pmd_bnxt.c +++ b/drivers/net/bnxt/rte_pmd_bnxt.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2018 Broadcom + * All rights reserved. */ #include diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h index cd7227ac..68fbe34d 100644 --- a/drivers/net/bnxt/rte_pmd_bnxt.h +++ b/drivers/net/bnxt/rte_pmd_bnxt.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Broadcom Limited. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Broadcom Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2018 Broadcom + * All rights reserved. */ #ifndef _PMD_BNXT_H_ diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile index 4a6633ed..acad16a1 100644 --- a/drivers/net/bonding/Makefile +++ b/drivers/net/bonding/Makefile @@ -27,6 +27,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_pmd.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_args.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_8023ad.c SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_alb.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_flow.c # # Export include files diff --git a/drivers/net/bonding/meson.build b/drivers/net/bonding/meson.build index b90abc6d..602d2880 100644 --- a/drivers/net/bonding/meson.build +++ b/drivers/net/bonding/meson.build @@ -2,7 +2,8 @@ # Copyright(c) 2017 Intel Corporation name = 'bond' #, james bond :-) -sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c', +version = 2 +sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c', 'rte_eth_bond_flow.c', 'rte_eth_bond_args.c', 'rte_eth_bond_8023ad.c', 'rte_eth_bond_alb.c') deps += 'sched' # needed for rte_bitmap.h diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c index c452318f..f8cea4b6 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -16,9 +16,12 @@ static void bond_mode_8023ad_ext_periodic_cb(void *arg); #ifdef RTE_LIBRTE_BOND_DEBUG_8023AD -#define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \ - bond_dbg_get_time_diff_ms(), slave_id, \ - __func__, ##__VA_ARGS__) + +#define MODE4_DEBUG(fmt, ...) \ + rte_log(RTE_LOG_DEBUG, bond_logtype, \ + "%6u [Port %u: %s] " fmt, \ + bond_dbg_get_time_diff_ms(), slave_id, \ + __func__, ##__VA_ARGS__) static uint64_t start_time; @@ -77,44 +80,46 @@ bond_print_lacp(struct lacpdu *l) if (p_len && p_state[p_len-1] == ' ') p_state[p_len-1] = '\0'; - RTE_LOG(DEBUG, PMD, "LACP: {\n"\ - " subtype= %02X\n"\ - " ver_num=%02X\n"\ - " actor={ tlv=%02X, len=%02X\n"\ - " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\ - " state={ %s }\n"\ - " }\n"\ - " partner={ tlv=%02X, len=%02X\n"\ - " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\ - " state={ %s }\n"\ - " }\n"\ - " collector={info=%02X, length=%02X, max_delay=%04X\n, " \ - "type_term=%02X, terminator_length = %02X}\n",\ - l->subtype,\ - l->version_number,\ - l->actor.tlv_type_info,\ - l->actor.info_length,\ - l->actor.port_params.system_priority,\ - a_address,\ - l->actor.port_params.key,\ - l->actor.port_params.port_priority,\ - l->actor.port_params.port_number,\ - a_state,\ - l->partner.tlv_type_info,\ - l->partner.info_length,\ - l->partner.port_params.system_priority,\ - p_address,\ - l->partner.port_params.key,\ - l->partner.port_params.port_priority,\ - l->partner.port_params.port_number,\ - p_state,\ - l->tlv_type_collector_info,\ - l->collector_info_length,\ - l->collector_max_delay,\ - l->tlv_type_terminator,\ - l->terminator_length); + RTE_BOND_LOG(DEBUG, + "LACP: {\n" + " subtype= %02X\n" + " ver_num=%02X\n" + " actor={ tlv=%02X, len=%02X\n" + " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n" + " state={ %s }\n" + " }\n" + " partner={ tlv=%02X, len=%02X\n" + " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n" + " state={ %s }\n" + " }\n" + " collector={info=%02X, length=%02X, max_delay=%04X\n, " + "type_term=%02X, terminator_length = %02X }", + l->subtype, + l->version_number, + l->actor.tlv_type_info, + l->actor.info_length, + l->actor.port_params.system_priority, + a_address, + l->actor.port_params.key, + l->actor.port_params.port_priority, + l->actor.port_params.port_number, + a_state, + l->partner.tlv_type_info, + l->partner.info_length, + l->partner.port_params.system_priority, + p_address, + l->partner.port_params.key, + l->partner.port_params.port_priority, + l->partner.port_params.port_number, + p_state, + l->tlv_type_collector_info, + l->collector_info_length, + l->collector_max_delay, + l->tlv_type_terminator, + l->terminator_length); } + #define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu) #else #define BOND_PRINT_LACP(lacpdu) do { } while (0) @@ -200,31 +205,34 @@ show_warnings(uint16_t slave_id) rte_get_tsc_hz() / 1000); if (warnings & WRN_RX_QUEUE_FULL) { - RTE_LOG(DEBUG, PMD, - "Slave %u: failed to enqueue LACP packet into RX ring.\n" - "Receive and transmit functions must be invoked on bonded\n" - "interface at least 10 times per second or LACP will not\n" - "work correctly\n", slave_id); + RTE_BOND_LOG(DEBUG, + "Slave %u: failed to enqueue LACP packet into RX ring.\n" + "Receive and transmit functions must be invoked on bonded" + "interface at least 10 times per second or LACP will notwork correctly", + slave_id); } if (warnings & WRN_TX_QUEUE_FULL) { - RTE_LOG(DEBUG, PMD, - "Slave %u: failed to enqueue LACP packet into TX ring.\n" - "Receive and transmit functions must be invoked on bonded\n" - "interface at least 10 times per second or LACP will not\n" - "work correctly\n", slave_id); + RTE_BOND_LOG(DEBUG, + "Slave %u: failed to enqueue LACP packet into TX ring.\n" + "Receive and transmit functions must be invoked on bonded" + "interface at least 10 times per second or LACP will not work correctly", + slave_id); } if (warnings & WRN_RX_MARKER_TO_FAST) - RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id); + RTE_BOND_LOG(INFO, "Slave %u: marker to early - ignoring.", + slave_id); if (warnings & WRN_UNKNOWN_SLOW_TYPE) { - RTE_LOG(INFO, PMD, - "Slave %u: ignoring unknown slow protocol frame type", slave_id); + RTE_BOND_LOG(INFO, + "Slave %u: ignoring unknown slow protocol frame type", + slave_id); } if (warnings & WRN_UNKNOWN_MARKER_TYPE) - RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id); + RTE_BOND_LOG(INFO, "Slave %u: ignoring unknown marker type", + slave_id); if (warnings & WRN_NOT_LACP_CAPABLE) MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id); @@ -507,8 +515,8 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id) ACTOR_STATE_SET(port, DISTRIBUTING); SM_FLAG_SET(port, NTT); MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n"); - RTE_LOG(INFO, PMD, - "Bond %u: slave id %u distributing started.\n", + RTE_BOND_LOG(INFO, + "Bond %u: slave id %u distributing started.", internals->port_id, slave_id); } } else { @@ -518,8 +526,8 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id) ACTOR_STATE_CLR(port, DISTRIBUTING); SM_FLAG_SET(port, NTT); MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n"); - RTE_LOG(INFO, PMD, - "Bond %u: slave id %u distributing stopped.\n", + RTE_BOND_LOG(INFO, + "Bond %u: slave id %u distributing stopped.", internals->port_id, slave_id); } } @@ -557,7 +565,7 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id) lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool); if (lacp_pkt == NULL) { - RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n"); + RTE_BOND_LOG(ERR, "Failed to allocate LACP packet from pool"); return; } @@ -1337,7 +1345,7 @@ bond_8023ad_setup_validate(uint16_t port_id, conf->tx_period_ms == 0 || conf->rx_marker_period_ms == 0 || conf->update_timeout_ms == 0) { - RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n"); + RTE_BOND_LOG(ERR, "given mode 4 configuration is invalid"); return -EINVAL; } } diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c index 3f9945b3..c3891c7e 100644 --- a/drivers/net/bonding/rte_eth_bond_alb.c +++ b/drivers/net/bonding/rte_eth_bond_alb.c @@ -60,8 +60,8 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev) 0, data_size, socket_id); if (internals->mode6.mempool == NULL) { - RTE_LOG(ERR, PMD, "%s: Failed to initialize ALB mempool.\n", - bond_dev->device->name); + RTE_BOND_LOG(ERR, "%s: Failed to initialize ALB mempool.\n", + bond_dev->device->name); goto mempool_alloc_error; } } diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c index f854b737..d558df8b 100644 --- a/drivers/net/bonding/rte_eth_bond_api.c +++ b/drivers/net/bonding/rte_eth_bond_api.c @@ -194,7 +194,8 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) uint16_t first; bonded_eth_dev = &rte_eth_devices[bonded_port_id]; - if (bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter == 0) + if ((bonded_eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) == 0) return 0; internals = bonded_eth_dev->data->dev_private; @@ -211,9 +212,12 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) for (i = 0, mask = 1; i < RTE_BITMAP_SLAB_BIT_SIZE; i ++, mask <<= 1) { - if (unlikely(slab & mask)) + if (unlikely(slab & mask)) { + uint16_t vlan_id = pos + i; + res = rte_eth_dev_vlan_filter(slave_port_id, - (uint16_t)pos, 1); + vlan_id, 1); + } } found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab); @@ -222,6 +226,49 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id) return res; } +static int +slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals) +{ + struct rte_flow *flow; + struct rte_flow_error ferror; + uint16_t slave_port_id = internals->slaves[slave_id].port_id; + + if (internals->flow_isolated_valid != 0) { + rte_eth_dev_stop(slave_port_id); + if (rte_flow_isolate(slave_port_id, internals->flow_isolated, + &ferror)) { + RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave" + " %d: %s", slave_id, ferror.message ? + ferror.message : "(no stated reason)"); + return -1; + } + } + TAILQ_FOREACH(flow, &internals->flow_list, next) { + flow->flows[slave_id] = rte_flow_create(slave_port_id, + &flow->fd->attr, + flow->fd->items, + flow->fd->actions, + &ferror); + if (flow->flows[slave_id] == NULL) { + RTE_BOND_LOG(ERR, "Cannot create flow for slave" + " %d: %s", slave_id, + ferror.message ? ferror.message : + "(no stated reason)"); + /* Destroy successful bond flows from the slave */ + TAILQ_FOREACH(flow, &internals->flow_list, next) { + if (flow->flows[slave_id] != NULL) { + rte_flow_destroy(slave_port_id, + flow->flows[slave_id], + &ferror); + flow->flows[slave_id] = NULL; + } + } + return -1; + } + } + return 0; +} + static int __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) { @@ -284,6 +331,8 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) /* Take the first dev's offload capabilities */ internals->rx_offload_capa = dev_info.rx_offload_capa; internals->tx_offload_capa = dev_info.tx_offload_capa; + internals->rx_queue_offload_capa = dev_info.rx_queue_offload_capa; + internals->tx_queue_offload_capa = dev_info.tx_queue_offload_capa; internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads; /* Inherit first slave's max rx packet size */ @@ -292,6 +341,8 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) } else { internals->rx_offload_capa &= dev_info.rx_offload_capa; internals->tx_offload_capa &= dev_info.tx_offload_capa; + internals->rx_queue_offload_capa &= dev_info.rx_queue_offload_capa; + internals->tx_queue_offload_capa &= dev_info.tx_queue_offload_capa; internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads; if (link_properties_valid(bonded_eth_dev, @@ -316,6 +367,12 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= internals->flow_type_rss_offloads; + if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) { + RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d", + slave_port_id); + return -1; + } + internals->slave_count++; if (bonded_eth_dev->data->dev_started) { @@ -393,6 +450,8 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, struct rte_eth_dev *bonded_eth_dev; struct bond_dev_private *internals; struct rte_eth_dev *slave_eth_dev; + struct rte_flow_error flow_error; + struct rte_flow *flow; int i, slave_idx; bonded_eth_dev = &rte_eth_devices[bonded_port_id]; @@ -432,6 +491,18 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, rte_eth_dev_default_mac_addr_set(slave_port_id, &(internals->slaves[slave_idx].persisted_mac_addr)); + /* + * Remove bond device flows from slave device. + * Note: don't restore flow isolate mode. + */ + TAILQ_FOREACH(flow, &internals->flow_list, next) { + if (flow->flows[slave_idx] != NULL) { + rte_flow_destroy(slave_port_id, flow->flows[slave_idx], + &flow_error); + flow->flows[slave_idx] = NULL; + } + } + slave_eth_dev = &rte_eth_devices[slave_port_id]; slave_remove(internals, slave_eth_dev); slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE); @@ -458,6 +529,8 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, if (internals->slave_count == 0) { internals->rx_offload_capa = 0; internals->tx_offload_capa = 0; + internals->rx_queue_offload_capa = 0; + internals->tx_queue_offload_capa = 0; internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; internals->reta_size = 0; internals->candidate_max_rx_pktlen = 0; diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c index 27d3101b..b60fde6a 100644 --- a/drivers/net/bonding/rte_eth_bond_args.c +++ b/drivers/net/bonding/rte_eth_bond_args.c @@ -32,7 +32,7 @@ find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr) struct rte_pci_addr *eth_pci_addr; unsigned i; - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]); eth_pci_addr = &pci_dev->addr; @@ -50,7 +50,7 @@ find_port_id_by_dev_name(const char *name) { unsigned i; - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { if (rte_eth_devices[i].data == NULL) continue; @@ -92,7 +92,7 @@ parse_port_id(const char *port_str) if (pci_bus->parse(port_str, &dev_addr) == 0) { dev = pci_bus->find_device(NULL, bond_pci_addr_cmp, &dev_addr); if (dev == NULL) { - RTE_LOG(ERR, PMD, "unable to find PCI device\n"); + RTE_BOND_LOG(ERR, "unable to find PCI device"); return -1; } port_id = find_port_id_by_pci_addr(&dev_addr); @@ -134,7 +134,8 @@ bond_ethdev_parse_slave_port_kvarg(const char *key, if (strcmp(key, PMD_BOND_SLAVE_PORT_KVARG) == 0) { int port_id = parse_port_id(value); if (port_id < 0) { - RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", value); + RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", + value); return -1; } else slave_ports->slaves[slave_ports->slave_count++] = @@ -244,7 +245,7 @@ bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused, if (primary_slave_port_id < 0) return -1; - *(uint8_t *)extra_args = (uint8_t)primary_slave_port_id; + *(uint16_t *)extra_args = (uint16_t)primary_slave_port_id; return 0; } diff --git a/drivers/net/bonding/rte_eth_bond_flow.c b/drivers/net/bonding/rte_eth_bond_flow.c new file mode 100644 index 00000000..31e4bcae --- /dev/null +++ b/drivers/net/bonding/rte_eth_bond_flow.c @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include + +#include +#include +#include + +#include "rte_eth_bond_private.h" + +static struct rte_flow * +bond_flow_alloc(int numa_node, const struct rte_flow_attr *attr, + const struct rte_flow_item *items, + const struct rte_flow_action *actions) +{ + struct rte_flow *flow; + size_t fdsz; + + fdsz = rte_flow_copy(NULL, 0, attr, items, actions); + flow = rte_zmalloc_socket(NULL, sizeof(struct rte_flow) + fdsz, + RTE_CACHE_LINE_SIZE, numa_node); + if (unlikely(flow == NULL)) { + RTE_BOND_LOG(ERR, "Could not allocate new flow"); + return NULL; + } + flow->fd = (void *)((uintptr_t)flow + sizeof(*flow)); + if (unlikely(rte_flow_copy(flow->fd, fdsz, attr, items, actions) != + fdsz)) { + RTE_BOND_LOG(ERR, "Failed to copy flow description"); + rte_free(flow); + return NULL; + } + return flow; +} + +static void +bond_flow_release(struct rte_flow **flow) +{ + rte_free(*flow); + *flow = NULL; +} + +static int +bond_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int ret; + + for (i = 0; i < internals->slave_count; i++) { + ret = rte_flow_validate(internals->slaves[i].port_id, attr, + patterns, actions, err); + if (ret) { + RTE_BOND_LOG(ERR, "Operation rte_flow_validate failed" + " for slave %d with error %d", i, ret); + return ret; + } + } + return 0; +} + +static struct rte_flow * +bond_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_flow *flow; + int i; + + flow = bond_flow_alloc(dev->data->numa_node, attr, patterns, actions); + if (unlikely(flow == NULL)) { + rte_flow_error_set(err, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOMEM)); + return NULL; + } + for (i = 0; i < internals->slave_count; i++) { + flow->flows[i] = rte_flow_create(internals->slaves[i].port_id, + attr, patterns, actions, err); + if (unlikely(flow->flows[i] == NULL)) { + RTE_BOND_LOG(ERR, "Failed to create flow on slave %d", + i); + goto err; + } + } + TAILQ_INSERT_TAIL(&internals->flow_list, flow, next); + return flow; +err: + /* Destroy all slaves flows. */ + for (i = 0; i < internals->slave_count; i++) { + if (flow->flows[i] != NULL) + rte_flow_destroy(internals->slaves[i].port_id, + flow->flows[i], err); + } + bond_flow_release(&flow); + return NULL; +} + +static int +bond_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int ret = 0; + + for (i = 0; i < internals->slave_count; i++) { + int lret; + + if (unlikely(flow->flows[i] == NULL)) + continue; + lret = rte_flow_destroy(internals->slaves[i].port_id, + flow->flows[i], err); + if (unlikely(lret != 0)) { + RTE_BOND_LOG(ERR, "Failed to destroy flow on slave %d:" + " %d", i, lret); + ret = lret; + } + } + TAILQ_REMOVE(&internals->flow_list, flow, next); + bond_flow_release(&flow); + return ret; +} + +static int +bond_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_flow *flow; + void *tmp; + int ret = 0; + int lret; + + /* Destroy all bond flows from its slaves instead of flushing them to + * keep the LACP flow or any other external flows. + */ + TAILQ_FOREACH_SAFE(flow, &internals->flow_list, next, tmp) { + lret = bond_flow_destroy(dev, flow, err); + if (unlikely(lret != 0)) + ret = lret; + } + if (unlikely(ret != 0)) + RTE_BOND_LOG(ERR, "Failed to flush flow in all slaves"); + return ret; +} + +static int +bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, + struct rte_flow_query_count *count, + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_flow_query_count slave_count; + int i; + int ret; + + count->bytes = 0; + count->hits = 0; + rte_memcpy(&slave_count, count, sizeof(slave_count)); + for (i = 0; i < internals->slave_count; i++) { + ret = rte_flow_query(internals->slaves[i].port_id, + flow->flows[i], action, + &slave_count, err); + if (unlikely(ret != 0)) { + RTE_BOND_LOG(ERR, "Failed to query flow on" + " slave %d: %d", i, ret); + return ret; + } + count->bytes += slave_count.bytes; + count->hits += slave_count.hits; + slave_count.bytes = 0; + slave_count.hits = 0; + } + return 0; +} + +static int +bond_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *arg, + struct rte_flow_error *err) +{ + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_COUNT: + return bond_flow_query_count(dev, flow, action, arg, err); + default: + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, arg, + rte_strerror(ENOTSUP)); + } +} + +static int +bond_flow_isolate(struct rte_eth_dev *dev, int set, + struct rte_flow_error *err) +{ + struct bond_dev_private *internals = dev->data->dev_private; + int i; + int ret; + + for (i = 0; i < internals->slave_count; i++) { + ret = rte_flow_isolate(internals->slaves[i].port_id, set, err); + if (unlikely(ret != 0)) { + RTE_BOND_LOG(ERR, "Operation rte_flow_isolate failed" + " for slave %d with error %d", i, ret); + internals->flow_isolated_valid = 0; + return ret; + } + } + internals->flow_isolated = set; + internals->flow_isolated_valid = 1; + return 0; +} + +const struct rte_flow_ops bond_flow_ops = { + .validate = bond_flow_validate, + .create = bond_flow_create, + .destroy = bond_flow_destroy, + .flush = bond_flow_flush, + .query = bond_flow_query, + .isolate = bond_flow_isolate, +}; diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index c34c3251..02d94b1b 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "rte_eth_bond.h" #include "rte_eth_bond_private.h" @@ -570,34 +571,21 @@ update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator) } #ifdef RTE_LIBRTE_BOND_DEBUG_ALB -#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \ - RTE_LOG(DEBUG, PMD, \ - "%s " \ - "port:%d " \ - "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \ - "SrcIP:%s " \ - "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \ - "DstIP:%s " \ - "%s " \ - "%d\n", \ - info, \ - port, \ - eth_h->s_addr.addr_bytes[0], \ - eth_h->s_addr.addr_bytes[1], \ - eth_h->s_addr.addr_bytes[2], \ - eth_h->s_addr.addr_bytes[3], \ - eth_h->s_addr.addr_bytes[4], \ - eth_h->s_addr.addr_bytes[5], \ - src_ip, \ - eth_h->d_addr.addr_bytes[0], \ - eth_h->d_addr.addr_bytes[1], \ - eth_h->d_addr.addr_bytes[2], \ - eth_h->d_addr.addr_bytes[3], \ - eth_h->d_addr.addr_bytes[4], \ - eth_h->d_addr.addr_bytes[5], \ - dst_ip, \ - arp_op, \ - ++burstnumber) +#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \ + rte_log(RTE_LOG_DEBUG, bond_logtype, \ + "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \ + "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \ + info, \ + port, \ + eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \ + eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \ + eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \ + src_ip, \ + eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \ + eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \ + eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \ + dst_ip, \ + arp_op, ++burstnumber) #endif static void @@ -617,7 +605,7 @@ mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h, uint16_t offset = get_vlan_offset(eth_h, ðer_type); #ifdef RTE_LIBRTE_BOND_DEBUG_ALB - snprintf(buf, 16, "%s", info); + strlcpy(buf, info, 16); #endif if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) { @@ -1138,7 +1126,8 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) /* Allocate new packet to send ARP update on current slave */ upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool); if (upd_pkt == NULL) { - RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n"); + RTE_BOND_LOG(ERR, + "Failed to allocate ARP packet from pool"); continue; } pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr) @@ -1560,12 +1549,12 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr) struct ether_addr *mac_addr; if (eth_dev == NULL) { - RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__); + RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified"); return -1; } if (dst_mac_addr == NULL) { - RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__); + RTE_BOND_LOG(ERR, "NULL pointer MAC specified"); return -1; } @@ -1686,9 +1675,9 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode) if (internals->mode4.dedicated_queues.enabled == 0) { eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad; eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad; - RTE_LOG(WARNING, PMD, + RTE_BOND_LOG(WARNING, "Using mode 4, it is necessary to do TX burst " - "and RX burst at least every 100ms.\n"); + "and RX burst at least every 100ms."); } else { /* Use flow director's optimization */ eth_dev->rx_pkt_burst = @@ -1818,8 +1807,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, bonded_eth_dev->data->dev_conf.rxmode.mq_mode; } - slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter = - bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter; + if (bonded_eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + slave_eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_FILTER; + else + slave_eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_VLAN_FILTER; nb_rx_queues = bonded_eth_dev->data->nb_rx_queues; nb_tx_queues = bonded_eth_dev->data->nb_tx_queues; @@ -1831,12 +1825,20 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, } } + errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id, + bonded_eth_dev->data->mtu); + if (errval != 0 && errval != -ENOTSUP) { + RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)", + slave_eth_dev->data->port_id, errval); + return errval; + } + /* Configure device */ errval = rte_eth_dev_configure(slave_eth_dev->data->port_id, nb_rx_queues, nb_tx_queues, &(slave_eth_dev->data->dev_conf)); if (errval != 0) { - RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)", + RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)", slave_eth_dev->data->port_id, errval); return errval; } @@ -1918,10 +1920,10 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, &internals->reta_conf[0], internals->slaves[i].reta_size); if (errval != 0) { - RTE_LOG(WARNING, PMD, - "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)." - " RSS Configuration for bonding may be inconsistent.\n", - slave_eth_dev->data->port_id, errval); + RTE_BOND_LOG(WARNING, + "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)." + " RSS Configuration for bonding may be inconsistent.", + slave_eth_dev->data->port_id, errval); } break; } @@ -1950,10 +1952,19 @@ slave_remove(struct bond_dev_private *internals, slave_eth_dev->data->port_id) break; - if (i < (internals->slave_count - 1)) + if (i < (internals->slave_count - 1)) { + struct rte_flow *flow; + memmove(&internals->slaves[i], &internals->slaves[i + 1], sizeof(internals->slaves[0]) * (internals->slave_count - i - 1)); + TAILQ_FOREACH(flow, &internals->flow_list, next) { + memmove(&flow->flows[i], &flow->flows[i + 1], + sizeof(flow->flows[0]) * + (internals->slave_count - i - 1)); + flow->flows[internals->slave_count - 1] = NULL; + } + } internals->slave_count--; @@ -2026,7 +2037,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) if (internals->slave_count == 0) { RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices"); - return -1; + goto out_err; } if (internals->user_defined_mac == 0) { @@ -2037,18 +2048,18 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) new_mac_addr = &internals->slaves[i].persisted_mac_addr; if (new_mac_addr == NULL) - return -1; + goto out_err; if (mac_address_set(eth_dev, new_mac_addr) != 0) { RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address", eth_dev->data->port_id); - return -1; + goto out_err; } } /* Update all slave devices MACs*/ if (mac_address_slaves_update(eth_dev) != 0) - return -1; + goto out_err; /* If bonded device is configure in promiscuous mode then re-apply config */ if (internals->promiscuous_en) @@ -2073,7 +2084,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) "bonded port (%d) failed to reconfigure slave device (%d)", eth_dev->data->port_id, internals->slaves[i].port_id); - return -1; + goto out_err; } /* We will need to poll for link status if any slave doesn't * support interrupts @@ -2081,6 +2092,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) if (internals->slaves[i].link_status_poll_enabled) internals->link_status_polling_enabled = 1; } + /* start polling if needed */ if (internals->link_status_polling_enabled) { rte_eal_alarm_set( @@ -2100,6 +2112,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) bond_tlb_enable(internals); return 0; + +out_err: + eth_dev->data->dev_started = 0; + return -1; } static void @@ -2172,20 +2188,22 @@ bond_ethdev_close(struct rte_eth_dev *dev) struct bond_dev_private *internals = dev->data->dev_private; uint8_t bond_port_id = internals->port_id; int skipped = 0; + struct rte_flow_error ferror; - RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->device->name); + RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name); while (internals->slave_count != skipped) { uint16_t port_id = internals->slaves[skipped].port_id; rte_eth_dev_stop(port_id); if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) { - RTE_LOG(ERR, EAL, - "Failed to remove port %d from bonded device " - "%s\n", port_id, dev->device->name); + RTE_BOND_LOG(ERR, + "Failed to remove port %d from bonded device %s", + port_id, dev->device->name); skipped++; } } + bond_flow_ops.flush(dev, &ferror); bond_ethdev_free_queues(dev); rte_bitmap_reset(internals->vlan_filter_bmp); } @@ -2244,6 +2262,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->rx_offload_capa = internals->rx_offload_capa; dev_info->tx_offload_capa = internals->tx_offload_capa; + dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa; dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; dev_info->reta_size = internals->reta_size; @@ -2269,9 +2289,9 @@ bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) res = rte_eth_dev_vlan_filter(port_id, vlan_id, on); if (res == ENOTSUP) - RTE_LOG(WARNING, PMD, - "Setting VLAN filter on slave port %u not supported.\n", - port_id); + RTE_BOND_LOG(WARNING, + "Setting VLAN filter on slave port %u not supported.", + port_id); } rte_spinlock_unlock(&internals->lock); @@ -2633,14 +2653,21 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, if (!valid_slave) return rc; + /* Synchronize lsc callback parallel calls either by real link event + * from the slaves PMDs or by the bonding PMD itself. + */ + rte_spinlock_lock(&internals->lsc_lock); + /* Search for port in active port list */ active_pos = find_slave_by_id(internals->active_slaves, internals->active_slave_count, port_id); rte_eth_link_get_nowait(port_id, &link); if (link.link_status) { - if (active_pos < internals->active_slave_count) + if (active_pos < internals->active_slave_count) { + rte_spinlock_unlock(&internals->lsc_lock); return rc; + } /* if no active slave ports then set this port to be primary port */ if (internals->active_slave_count < 1) { @@ -2659,8 +2686,10 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, internals->primary_port == port_id) bond_ethdev_primary_set(internals, port_id); } else { - if (active_pos == internals->active_slave_count) + if (active_pos == internals->active_slave_count) { + rte_spinlock_unlock(&internals->lsc_lock); return rc; + } /* Remove from active slave list */ deactivate_slave(bonded_eth_dev, port_id); @@ -2713,6 +2742,9 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, NULL); } } + + rte_spinlock_unlock(&internals->lsc_lock); + return 0; } @@ -2851,11 +2883,26 @@ bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return 0; } -static void +static int bond_ethdev_mac_address_set(struct rte_eth_dev *dev, struct ether_addr *addr) { - if (mac_address_set(dev, addr)) + if (mac_address_set(dev, addr)) { RTE_BOND_LOG(ERR, "Failed to update MAC address"); + return -EINVAL; + } + + return 0; +} + +static int +bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type type, enum rte_filter_op op, void *arg) +{ + if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) { + *(const void **)arg = &bond_flow_ops; + return 0; + } + return -ENOTSUP; } const struct eth_dev_ops default_dev_ops = { @@ -2879,7 +2926,8 @@ const struct eth_dev_ops default_dev_ops = { .rss_hash_update = bond_ethdev_rss_hash_update, .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get, .mtu_set = bond_ethdev_mtu_set, - .mac_addr_set = bond_ethdev_mac_address_set + .mac_addr_set = bond_ethdev_mac_address_set, + .filter_ctrl = bond_filter_ctrl }; static int @@ -2917,6 +2965,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC; rte_spinlock_init(&internals->lock); + rte_spinlock_init(&internals->lsc_lock); internals->port_id = eth_dev->data->port_id; internals->mode = BONDING_MODE_INVALID; @@ -2936,6 +2985,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) internals->active_slave_count = 0; internals->rx_offload_capa = 0; internals->tx_offload_capa = 0; + internals->rx_queue_offload_capa = 0; + internals->tx_queue_offload_capa = 0; internals->candidate_max_rx_pktlen = 0; internals->max_rx_pktlen = 0; @@ -2945,10 +2996,13 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); memset(internals->slaves, 0, sizeof(internals->slaves)); + TAILQ_INIT(&internals->flow_list); + internals->flow_isolated_valid = 0; + /* Set mode 4 default configuration */ bond_mode_8023ad_setup(eth_dev, NULL); if (bond_ethdev_mode_set(eth_dev, mode)) { - RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d", + RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d", eth_dev->data->port_id, mode); goto err; } @@ -2959,7 +3013,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) RTE_CACHE_LINE_SIZE); if (internals->vlan_filter_bmpmem == NULL) { RTE_BOND_LOG(ERR, - "Failed to allocate vlan bitmap for bonded device %u\n", + "Failed to allocate vlan bitmap for bonded device %u", eth_dev->data->port_id); goto err; } @@ -2968,7 +3022,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) internals->vlan_filter_bmpmem, vlan_filter_bmp_size); if (internals->vlan_filter_bmp == NULL) { RTE_BOND_LOG(ERR, - "Failed to init vlan bitmap for bonded device %u\n", + "Failed to init vlan bitmap for bonded device %u", eth_dev->data->port_id); rte_free(internals->vlan_filter_bmpmem); goto err; @@ -2994,12 +3048,26 @@ bond_probe(struct rte_vdev_device *dev) uint8_t bonding_mode, socket_id/*, agg_mode*/; int arg_count, port_id; uint8_t agg_mode; + struct rte_eth_dev *eth_dev; if (!dev) return -EINVAL; name = rte_vdev_device_name(dev); - RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name); + RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + RTE_BOND_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &default_dev_ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), pmd_bond_init_valid_arguments); @@ -3011,13 +3079,13 @@ bond_probe(struct rte_vdev_device *dev) if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG, &bond_ethdev_parse_slave_mode_kvarg, &bonding_mode) != 0) { - RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n", + RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s", name); goto parse_error; } } else { - RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded " - "device %s\n", name); + RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded " + "device %s", name); goto parse_error; } @@ -3027,13 +3095,13 @@ bond_probe(struct rte_vdev_device *dev) if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG, &bond_ethdev_parse_socket_id_kvarg, &socket_id) != 0) { - RTE_LOG(ERR, EAL, "Invalid socket Id specified for " - "bonded device %s\n", name); + RTE_BOND_LOG(ERR, "Invalid socket Id specified for " + "bonded device %s", name); goto parse_error; } } else if (arg_count > 1) { - RTE_LOG(ERR, EAL, "Socket Id can be specified only once for " - "bonded device %s\n", name); + RTE_BOND_LOG(ERR, "Socket Id can be specified only once for " + "bonded device %s", name); goto parse_error; } else { socket_id = rte_socket_id(); @@ -3044,8 +3112,8 @@ bond_probe(struct rte_vdev_device *dev) /* Create link bonding eth device */ port_id = bond_alloc(dev, bonding_mode); if (port_id < 0) { - RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on " - "socket %u.\n", name, bonding_mode, socket_id); + RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on " + "socket %u.", name, bonding_mode, socket_id); goto parse_error; } internals = rte_eth_devices[port_id].data->dev_private; @@ -3057,8 +3125,8 @@ bond_probe(struct rte_vdev_device *dev) PMD_BOND_AGG_MODE_KVARG, &bond_ethdev_parse_slave_agg_mode_kvarg, &agg_mode) != 0) { - RTE_LOG(ERR, EAL, - "Failed to parse agg selection mode for bonded device %s\n", + RTE_BOND_LOG(ERR, + "Failed to parse agg selection mode for bonded device %s", name); goto parse_error; } @@ -3070,8 +3138,9 @@ bond_probe(struct rte_vdev_device *dev) rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE); } - RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on " - "socket %u.\n", name, port_id, bonding_mode, socket_id); + rte_eth_dev_probing_finish(&rte_eth_devices[port_id]); + RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on " + "socket %u.", name, port_id, bonding_mode, socket_id); return 0; parse_error: @@ -3091,7 +3160,7 @@ bond_remove(struct rte_vdev_device *dev) return -EINVAL; name = rte_vdev_device_name(dev); - RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name); + RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name); /* now free all data allocation - for eth_dev structure, * dummy pci driver and internal (private) data @@ -3118,6 +3187,10 @@ bond_remove(struct rte_vdev_device *dev) eth_dev->tx_pkt_burst = NULL; internals = eth_dev->data->dev_private; + /* Try to release mempool used in mode6. If the bond + * device is not mode6, free the NULL is not problem. + */ + rte_mempool_free(internals->mode6.mempool); rte_bitmap_free(internals->vlan_filter_bmp); rte_free(internals->vlan_filter_bmpmem); rte_free(eth_dev->data->dev_private); @@ -3178,23 +3251,23 @@ bond_ethdev_configure(struct rte_eth_dev *dev) struct ether_addr bond_mac; if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG, - &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) { - RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n", - name); + &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) { + RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s", + name); return -1; } /* Set MAC address */ if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) { - RTE_LOG(ERR, EAL, - "Failed to set mac address on bonded device %s\n", - name); + RTE_BOND_LOG(ERR, + "Failed to set mac address on bonded device %s", + name); return -1; } } else if (arg_count > 1) { - RTE_LOG(ERR, EAL, - "MAC address can be specified only once for bonded device %s\n", - name); + RTE_BOND_LOG(ERR, + "MAC address can be specified only once for bonded device %s", + name); return -1; } @@ -3204,40 +3277,40 @@ bond_ethdev_configure(struct rte_eth_dev *dev) uint8_t xmit_policy; if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG, - &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) != - 0) { - RTE_LOG(INFO, EAL, - "Invalid xmit policy specified for bonded device %s\n", - name); + &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) != + 0) { + RTE_BOND_LOG(INFO, + "Invalid xmit policy specified for bonded device %s", + name); return -1; } /* Set balance mode transmit policy*/ if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) { - RTE_LOG(ERR, EAL, - "Failed to set balance xmit policy on bonded device %s\n", - name); + RTE_BOND_LOG(ERR, + "Failed to set balance xmit policy on bonded device %s", + name); return -1; } } else if (arg_count > 1) { - RTE_LOG(ERR, EAL, - "Transmit policy can be specified only once for bonded device" - " %s\n", name); + RTE_BOND_LOG(ERR, + "Transmit policy can be specified only once for bonded device %s", + name); return -1; } if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) { if (rte_kvargs_process(kvlist, - PMD_BOND_AGG_MODE_KVARG, - &bond_ethdev_parse_slave_agg_mode_kvarg, - &agg_mode) != 0) { - RTE_LOG(ERR, EAL, - "Failed to parse agg selection mode for bonded device %s\n", - name); + PMD_BOND_AGG_MODE_KVARG, + &bond_ethdev_parse_slave_agg_mode_kvarg, + &agg_mode) != 0) { + RTE_BOND_LOG(ERR, + "Failed to parse agg selection mode for bonded device %s", + name); } if (internals->mode == BONDING_MODE_8023AD) - rte_eth_bond_8023ad_agg_selection_set(port_id, - agg_mode); + rte_eth_bond_8023ad_agg_selection_set(port_id, + agg_mode); } /* Parse/add slave ports to bonded device */ @@ -3248,23 +3321,23 @@ bond_ethdev_configure(struct rte_eth_dev *dev) memset(&slave_ports, 0, sizeof(slave_ports)); if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG, - &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) { - RTE_LOG(ERR, EAL, - "Failed to parse slave ports for bonded device %s\n", - name); + &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) { + RTE_BOND_LOG(ERR, + "Failed to parse slave ports for bonded device %s", + name); return -1; } for (i = 0; i < slave_ports.slave_count; i++) { if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) { - RTE_LOG(ERR, EAL, - "Failed to add port %d as slave to bonded device %s\n", - slave_ports.slaves[i], name); + RTE_BOND_LOG(ERR, + "Failed to add port %d as slave to bonded device %s", + slave_ports.slaves[i], name); } } } else { - RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name); + RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name); return -1; } @@ -3274,27 +3347,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev) uint16_t primary_slave_port_id; if (rte_kvargs_process(kvlist, - PMD_BOND_PRIMARY_SLAVE_KVARG, - &bond_ethdev_parse_primary_slave_port_id_kvarg, - &primary_slave_port_id) < 0) { - RTE_LOG(INFO, EAL, - "Invalid primary slave port id specified for bonded device" - " %s\n", name); + PMD_BOND_PRIMARY_SLAVE_KVARG, + &bond_ethdev_parse_primary_slave_port_id_kvarg, + &primary_slave_port_id) < 0) { + RTE_BOND_LOG(INFO, + "Invalid primary slave port id specified for bonded device %s", + name); return -1; } /* Set balance mode transmit policy*/ if (rte_eth_bond_primary_set(port_id, primary_slave_port_id) - != 0) { - RTE_LOG(ERR, EAL, - "Failed to set primary slave port %d on bonded device %s\n", - primary_slave_port_id, name); + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set primary slave port %d on bonded device %s", + primary_slave_port_id, name); return -1; } } else if (arg_count > 1) { - RTE_LOG(INFO, EAL, - "Primary slave can be specified only once for bonded device" - " %s\n", name); + RTE_BOND_LOG(INFO, + "Primary slave can be specified only once for bonded device %s", + name); return -1; } @@ -3304,26 +3377,26 @@ bond_ethdev_configure(struct rte_eth_dev *dev) uint32_t lsc_poll_interval_ms; if (rte_kvargs_process(kvlist, - PMD_BOND_LSC_POLL_PERIOD_KVARG, - &bond_ethdev_parse_time_ms_kvarg, - &lsc_poll_interval_ms) < 0) { - RTE_LOG(INFO, EAL, - "Invalid lsc polling interval value specified for bonded" - " device %s\n", name); + PMD_BOND_LSC_POLL_PERIOD_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &lsc_poll_interval_ms) < 0) { + RTE_BOND_LOG(INFO, + "Invalid lsc polling interval value specified for bonded" + " device %s", name); return -1; } if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms) - != 0) { - RTE_LOG(ERR, EAL, - "Failed to set lsc monitor polling interval (%u ms) on" - " bonded device %s\n", lsc_poll_interval_ms, name); + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set lsc monitor polling interval (%u ms) on bonded device %s", + lsc_poll_interval_ms, name); return -1; } } else if (arg_count > 1) { - RTE_LOG(INFO, EAL, - "LSC polling interval can be specified only once for bonded" - " device %s\n", name); + RTE_BOND_LOG(INFO, + "LSC polling interval can be specified only once for bonded" + " device %s", name); return -1; } @@ -3333,27 +3406,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev) uint32_t link_up_delay_ms; if (rte_kvargs_process(kvlist, - PMD_BOND_LINK_UP_PROP_DELAY_KVARG, - &bond_ethdev_parse_time_ms_kvarg, - &link_up_delay_ms) < 0) { - RTE_LOG(INFO, EAL, - "Invalid link up propagation delay value specified for" - " bonded device %s\n", name); + PMD_BOND_LINK_UP_PROP_DELAY_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &link_up_delay_ms) < 0) { + RTE_BOND_LOG(INFO, + "Invalid link up propagation delay value specified for" + " bonded device %s", name); return -1; } /* Set balance mode transmit policy*/ if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms) - != 0) { - RTE_LOG(ERR, EAL, - "Failed to set link up propagation delay (%u ms) on bonded" - " device %s\n", link_up_delay_ms, name); + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set link up propagation delay (%u ms) on bonded" + " device %s", link_up_delay_ms, name); return -1; } } else if (arg_count > 1) { - RTE_LOG(INFO, EAL, - "Link up propagation delay can be specified only once for" - " bonded device %s\n", name); + RTE_BOND_LOG(INFO, + "Link up propagation delay can be specified only once for" + " bonded device %s", name); return -1; } @@ -3363,27 +3436,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev) uint32_t link_down_delay_ms; if (rte_kvargs_process(kvlist, - PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG, - &bond_ethdev_parse_time_ms_kvarg, - &link_down_delay_ms) < 0) { - RTE_LOG(INFO, EAL, - "Invalid link down propagation delay value specified for" - " bonded device %s\n", name); + PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG, + &bond_ethdev_parse_time_ms_kvarg, + &link_down_delay_ms) < 0) { + RTE_BOND_LOG(INFO, + "Invalid link down propagation delay value specified for" + " bonded device %s", name); return -1; } /* Set balance mode transmit policy*/ if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms) - != 0) { - RTE_LOG(ERR, EAL, - "Failed to set link down propagation delay (%u ms) on" - " bonded device %s\n", link_down_delay_ms, name); + != 0) { + RTE_BOND_LOG(ERR, + "Failed to set link down propagation delay (%u ms) on bonded device %s", + link_down_delay_ms, name); return -1; } } else if (arg_count > 1) { - RTE_LOG(INFO, EAL, - "Link down propagation delay can be specified only once for" - " bonded device %s\n", name); + RTE_BOND_LOG(INFO, + "Link down propagation delay can be specified only once for bonded device %s", + name); return -1; } @@ -3409,3 +3482,14 @@ RTE_PMD_REGISTER_PARAM_STRING(net_bonding, "lsc_poll_period_ms= " "up_delay= " "down_delay="); + +int bond_logtype; + +RTE_INIT(bond_init_log); +static void +bond_init_log(void) +{ + bond_logtype = rte_log_register("pmd.net.bon"); + if (bond_logtype >= 0) + rte_log_set_level(bond_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h index 92e15f8c..65445b86 100644 --- a/drivers/net/bonding/rte_eth_bond_private.h +++ b/drivers/net/bonding/rte_eth_bond_private.h @@ -5,9 +5,12 @@ #ifndef _RTE_ETH_BOND_PRIVATE_H_ #define _RTE_ETH_BOND_PRIVATE_H_ +#include + #include #include #include +#include #include "rte_eth_bond.h" #include "rte_eth_bond_8023ad_private.h" @@ -28,8 +31,11 @@ #define PMD_BOND_XMIT_POLICY_LAYER23_KVARG ("l23") #define PMD_BOND_XMIT_POLICY_LAYER34_KVARG ("l34") +extern int bond_logtype; + #define RTE_BOND_LOG(lvl, msg, ...) \ - RTE_LOG(lvl, PMD, "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__) + rte_log(RTE_LOG_ ## lvl, bond_logtype, \ + "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__) #define BONDING_MODE_INVALID 0xFF @@ -37,6 +43,8 @@ extern const char *pmd_bond_init_valid_arguments[]; extern struct rte_vdev_driver pmd_bond_drv; +extern const struct rte_flow_ops bond_flow_ops; + /** Port Queue Mapping Structure */ struct bond_rx_queue { uint16_t queue_id; @@ -80,6 +88,14 @@ struct bond_slave_details { uint16_t reta_size; }; +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; + /* Slaves flows */ + struct rte_flow *flows[RTE_MAX_ETHPORTS]; + /* Flow description for synchronization */ + struct rte_flow_desc *fd; +}; + typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts, uint8_t slave_count, uint16_t *slaves); @@ -89,6 +105,7 @@ struct bond_dev_private { uint8_t mode; /**< Link Bonding Mode */ rte_spinlock_t lock; + rte_spinlock_t lsc_lock; uint16_t primary_port; /**< Primary Slave Port */ uint16_t current_primary_port; /**< Primary Slave Port */ @@ -128,8 +145,17 @@ struct bond_dev_private { /**< TLB active slaves send order */ struct mode_alb_private mode6; - uint32_t rx_offload_capa; /** Rx offload capability */ - uint32_t tx_offload_capa; /** Tx offload capability */ + uint64_t rx_offload_capa; /** Rx offload capability */ + uint64_t tx_offload_capa; /** Tx offload capability */ + uint64_t rx_queue_offload_capa; /** per queue Rx offload capability */ + uint64_t tx_queue_offload_capa; /** per queue Tx offload capability */ + + /**< List of the configured flows */ + TAILQ_HEAD(sub_flows, rte_flow) flow_list; + + /**< Flow isolation state */ + int flow_isolated; + int flow_isolated_valid; /** Bit mask of RSS offloads, the bit offset also means flow type */ uint64_t flow_type_rss_offloads; diff --git a/drivers/net/bonding/rte_pmd_bond_version.map b/drivers/net/bonding/rte_pmd_bond_version.map index ec3374b0..03ddb44e 100644 --- a/drivers/net/bonding/rte_pmd_bond_version.map +++ b/drivers/net/bonding/rte_pmd_bond_version.map @@ -1,6 +1,7 @@ DPDK_2.0 { global: + rte_eth_bond_8023ad_slave_info; rte_eth_bond_active_slaves_get; rte_eth_bond_create; rte_eth_bond_link_monitoring_set; diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile index 65df1425..79fdb6f0 100644 --- a/drivers/net/cxgbe/Makefile +++ b/drivers/net/cxgbe/Makefile @@ -1,33 +1,6 @@ -# BSD LICENSE -# -# Copyright(c) 2014-2015 Chelsio Communications. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Chelsio Communications nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2014-2018 Chelsio Communications. +# All rights reserved. include $(RTE_SDK)/mk/rte.vars.mk @@ -45,12 +18,6 @@ EXPORT_MAP := rte_pmd_cxgbe_version.map LIBABIVER := 1 -ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) -# -# CFLAGS for icc -# -CFLAGS_BASE_DRIVER = -wd188 -else # # CFLAGS for gcc/clang # @@ -59,9 +26,7 @@ ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) CFLAGS += -Wno-deprecated endif endif -CFLAGS_BASE_DRIVER = -endif LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci @@ -80,8 +45,11 @@ VPATH += $(SRCDIR)/base # all source are stored in SRCS-y # SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_main.c SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c +SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h index f2057af1..55cb2e91 100644 --- a/drivers/net/cxgbe/base/adapter.h +++ b/drivers/net/cxgbe/base/adapter.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ /* This file should not be included directly. Include common.h instead. */ @@ -68,6 +40,7 @@ struct port_info { u8 port_type; /* firmware port type */ u8 mod_type; /* firmware module type */ u8 port_id; /* physical port ID */ + u8 pidx; /* port index for this PF */ u8 tx_chan; /* associated channel */ u8 n_rx_qsets; /* # of rx qsets */ @@ -77,6 +50,7 @@ struct port_info { u16 *rss; /* rss table */ u8 rss_mode; /* rss mode */ u16 rss_size; /* size of VI's RSS table slice */ + u64 rss_hf; /* RSS Hash Function */ }; /* Enable or disable autonegotiation. If this is set to enable, @@ -196,6 +170,7 @@ struct sge_eth_rxq { /* a SW Ethernet Rx queue */ * scenario where a packet needs 32 bytes. */ #define ETH_COALESCE_PKT_NUM 15 +#define ETH_COALESCE_VF_PKT_NUM 7 #define ETH_COALESCE_PKT_PER_DESC 2 struct tx_eth_coal_desc { @@ -225,6 +200,10 @@ struct eth_coalesce { unsigned int len; unsigned int flits; unsigned int max; + __u8 ethmacdst[ETHER_ADDR_LEN]; + __u8 ethmacsrc[ETHER_ADDR_LEN]; + __be16 ethtype; + __be16 vlantci; }; struct sge_txq { @@ -247,6 +226,7 @@ struct sge_txq { unsigned int equeidx; /* last sent credit request */ unsigned int last_pidx; /* last pidx recorded by tx monitor */ unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */ + unsigned int abs_id; int db_disabled; /* doorbell state */ unsigned short db_pidx; /* doorbell producer index */ @@ -267,6 +247,7 @@ struct sge_eth_tx_stats { /* Ethernet tx queue statistics */ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ struct sge_txq q; struct rte_eth_dev *eth_dev; /* port that this queue belongs to */ + struct rte_eth_dev_data *data; struct sge_eth_tx_stats stats; /* queue statistics */ rte_spinlock_t txq_lock; @@ -308,7 +289,7 @@ struct adapter { struct rte_pci_device *pdev; /* associated rte pci device */ struct rte_eth_dev *eth_dev; /* first port's rte eth device */ struct adapter_params params; /* adapter parameters */ - struct port_info port[MAX_NPORTS]; /* ports belonging to this adapter */ + struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */ struct sge sge; /* associated SGE */ /* support for single-threading access to adapter mailbox registers */ @@ -327,6 +308,18 @@ struct adapter { int use_unpacked_mode; /* unpacked rx mode state */ }; +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @idx: the port index + * + * Return the port_info structure for the port of the given index. + */ +static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx) +{ + return adap->port[idx]; +} + #define CXGBE_PCI_REG(reg) rte_read32(reg) static inline uint64_t cxgbe_read_addr64(volatile void *addr) @@ -602,7 +595,7 @@ static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap) static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx, u8 hw_addr[]) { - struct port_info *pi = &adapter->port[port_idx]; + struct port_info *pi = adap2pinfo(adapter, port_idx); ether_addr_copy((struct ether_addr *)hw_addr, &pi->eth_dev->data->mac_addrs[0]); @@ -687,18 +680,6 @@ static inline void t4_os_atomic_list_del(struct mbox_entry *entry, t4_os_unlock(lock); } -/** - * adap2pinfo - return the port_info of a port - * @adap: the adapter - * @idx: the port index - * - * Return the port_info structure for the port of the given index. - */ -static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) -{ - return &adap->port[idx]; -} - void *t4_alloc_mem(size_t size); void t4_free_mem(void *addr); #define t4_os_alloc(_size) t4_alloc_mem((_size)) @@ -716,6 +697,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl); int t4_sge_init(struct adapter *adap); +int t4vf_sge_init(struct adapter *adap); int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct rte_eth_dev *eth_dev, uint16_t queue_id, unsigned int iqid, int socket_id); @@ -735,6 +717,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, unsigned int cnt); int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, unsigned int budget, unsigned int *work_done); -int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); +int cxgbe_write_rss(const struct port_info *pi, const u16 *queues); +int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags); #endif /* __T4_ADAPTER_H__ */ diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h index 1eda57d0..155a3028 100644 --- a/drivers/net/cxgbe/base/common.h +++ b/drivers/net/cxgbe/base/common.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef __CHELSIO_COMMON_H @@ -36,6 +8,7 @@ #include "cxgbe_compat.h" #include "t4_hw.h" +#include "t4vf_hw.h" #include "t4_chip_type.h" #include "t4fw_interface.h" @@ -62,16 +35,16 @@ enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST }; enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR }; -enum { +enum cc_pause { PAUSE_RX = 1 << 0, PAUSE_TX = 1 << 1, PAUSE_AUTONEG = 1 << 2 }; -enum { - FEC_RS = 1 << 0, - FEC_BASER_RS = 1 << 1, - FEC_RESERVED = 1 << 2, +enum cc_fec { + FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ + FEC_RS = 1 << 1, /* Reed-Solomon */ + FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */ }; struct port_stats { @@ -209,12 +182,50 @@ struct arch_specific_params { u16 mps_tcam_size; }; +/* + * Global Receive Side Scaling (RSS) parameters in host-native format. + */ +struct rss_params { + unsigned int mode; /* RSS mode */ + union { + struct { + uint synmapen:1; /* SYN Map Enable */ + uint syn4tupenipv6:1; /* en 4-tuple IPv6 SYNs hash */ + uint syn2tupenipv6:1; /* en 2-tuple IPv6 SYNs hash */ + uint syn4tupenipv4:1; /* en 4-tuple IPv4 SYNs hash */ + uint syn2tupenipv4:1; /* en 2-tuple IPv4 SYNs hash */ + uint ofdmapen:1; /* Offload Map Enable */ + uint tnlmapen:1; /* Tunnel Map Enable */ + uint tnlalllookup:1; /* Tunnel All Lookup */ + uint hashtoeplitz:1; /* use Toeplitz hash */ + } basicvirtual; + } u; +}; + +/* + * Maximum resources provisioned for a PCI VF. + */ +struct vf_resources { + unsigned int nvi; /* N virtual interfaces */ + unsigned int neq; /* N egress Qs */ + unsigned int nethctrl; /* N egress ETH or CTRL Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ + unsigned int niq; /* N ingress Qs */ + unsigned int tc; /* PCI-E traffic class */ + unsigned int pmask; /* port access rights mask */ + unsigned int nexactf; /* N exact MPS filters */ + unsigned int r_caps; /* read capabilities */ + unsigned int wx_caps; /* write/execute capabilities */ +}; + struct adapter_params { struct sge_params sge; struct tp_params tp; struct vpd_params vpd; struct pci_params pci; struct devlog_params devlog; + struct rss_params rss; + struct vf_resources vfres; enum pcie_memwin drv_memwin; unsigned int sf_size; /* serial flash size in bytes */ @@ -239,19 +250,40 @@ struct adapter_params { struct arch_specific_params arch; /* chip specific params */ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ + u8 fw_caps_support; /* 32-bit Port Capabilities */ +}; + +/* Firmware Port Capabilities types. + */ +typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ +typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */ + +enum fw_caps { + FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */ + FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */ + FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */ }; struct link_config { - unsigned short supported; /* link capabilities */ - unsigned short advertising; /* advertised capabilities */ - unsigned int requested_speed; /* speed user has requested */ - unsigned int speed; /* actual link speed */ - unsigned char requested_fc; /* flow control user has requested */ - unsigned char fc; /* actual link flow control */ - unsigned char requested_fec; /* Forward Error Correction user */ - unsigned char fec; /* has requested and actual FEC */ - unsigned char autoneg; /* autonegotiating? */ - unsigned char link_ok; /* link up? */ + fw_port_cap32_t pcaps; /* link capabilities */ + fw_port_cap32_t acaps; /* advertised capabilities */ + + u32 requested_speed; /* speed (Mb/s) user has requested */ + u32 speed; /* actual link speed (Mb/s) */ + + enum cc_pause requested_fc; /* flow control user has requested */ + enum cc_pause fc; /* actual link flow control */ + + enum cc_fec auto_fec; /* Forward Error Correction + * "automatic" (IEEE 802.3) + */ + enum cc_fec requested_fec; /* Forward Error Correction requested */ + enum cc_fec fec; /* Forward Error Correction actual */ + + unsigned char autoneg; /* autonegotiating? */ + + unsigned char link_ok; /* link up? */ + unsigned char link_down_rc; /* link down reason */ }; #include "adapter.h" @@ -269,6 +301,11 @@ static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, delay, NULL); } +static inline int is_pf4(struct adapter *adap) +{ + return adap->pf == 4; +} + #define for_each_port(adapter, iter) \ for (iter = 0; iter < (adapter)->params.nports; ++iter) @@ -285,9 +322,12 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state); int t4_fw_bye(struct adapter *adap, unsigned int mbox); int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4vf_fw_reset(struct adapter *adap); int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset); int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset); int t4_fl_pkt_align(struct adapter *adap); +int t4vf_fl_pkt_align(struct adapter *adap, u32 sge_control, u32 sge_control2); +int t4vf_get_vfres(struct adapter *adap); int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size, unsigned int cache_line_size, enum chip_type chip_compat); @@ -297,6 +337,13 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox); int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val); +int t4vf_query_params(struct adapter *adap, unsigned int nparams, + const u32 *params, u32 *vals); +int t4vf_get_dev_params(struct adapter *adap); +int t4vf_get_vpd_params(struct adapter *adap); +int t4vf_get_rss_glb_config(struct adapter *adap); +int t4vf_set_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, const u32 *vals); int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, @@ -379,6 +426,21 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); } +int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool); + +static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true); +} + +static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); +} + + void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx); @@ -394,22 +456,34 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, unsigned int pidx); unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx); const char *t4_get_port_type_description(enum fw_port_type port_type); void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4vf_get_port_stats(struct adapter *adapter, int pidx, + struct port_stats *p); void t4_get_port_stats_offset(struct adapter *adap, int idx, struct port_stats *stats, struct port_stats *offset); void t4_clr_port_stats(struct adapter *adap, int idx); +void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, + fw_port_cap32_t acaps); void t4_reset_link_config(struct adapter *adap, int idx); int t4_get_version_info(struct adapter *adapter); void t4_dump_version_info(struct adapter *adapter); int t4_get_flash_params(struct adapter *adapter); int t4_get_chip_type(struct adapter *adap, int ver); int t4_prep_adapter(struct adapter *adapter); +int t4vf_prep_adapter(struct adapter *adapter); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +int t4vf_port_init(struct adapter *adap); int t4_init_rss_mode(struct adapter *adap, int mbox); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq); int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, unsigned int flags, unsigned int defq); +int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + u64 *flags, unsigned int *defq); +void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, + unsigned int start_index, unsigned int rw); +void t4_write_rss_key(struct adapter *adap, u32 *key, int idx); +void t4_read_rss_key(struct adapter *adap, u32 *key); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, @@ -421,8 +495,10 @@ int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); unsigned int t4_get_regs_len(struct adapter *adap); +unsigned int t4vf_get_pf_from_vf(struct adapter *adap); void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data); int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data); int t4_seeprom_wp(struct adapter *adapter, int enable); +fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); #endif /* __CHELSIO_COMMON_H */ diff --git a/drivers/net/cxgbe/base/t4_chip_type.h b/drivers/net/cxgbe/base/t4_chip_type.h index cd7a9282..c0c5d0b2 100644 --- a/drivers/net/cxgbe/base/t4_chip_type.h +++ b/drivers/net/cxgbe/base/t4_chip_type.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef __T4_CHIP_TYPE_H__ diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c index 56f38c83..e5ef73b6 100644 --- a/drivers/net/cxgbe/base/t4_hw.c +++ b/drivers/net/cxgbe/base/t4_hw.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #include @@ -55,9 +27,6 @@ #include "t4_regs_values.h" #include "t4fw_interface.h" -static void init_link_config(struct link_config *lc, unsigned int pcaps, - unsigned int acaps); - /** * t4_read_mtu_tbl - returns the values in the HW path MTU table * @adap: the adapter @@ -2166,6 +2135,91 @@ int t4_seeprom_wp(struct adapter *adapter, int enable) return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); } +/** + * t4_fw_tp_pio_rw - Access TP PIO through LDST + * @adap: the adapter + * @vals: where the indirect register values are stored/written + * @nregs: how many indirect registers to read/write + * @start_idx: index of first indirect register to read/write + * @rw: Read (1) or Write (0) + * + * Access TP PIO registers through LDST + */ +void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, + unsigned int start_index, unsigned int rw) +{ + int cmd = FW_LDST_ADDRSPC_TP_PIO; + struct fw_ldst_cmd c; + unsigned int i; + int ret; + + for (i = 0 ; i < nregs; i++) { + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | + F_FW_CMD_REQUEST | + (rw ? F_FW_CMD_READ : + F_FW_CMD_WRITE) | + V_FW_LDST_CMD_ADDRSPACE(cmd)); + c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); + + c.u.addrval.addr = cpu_to_be32(start_index + i); + c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (ret == 0) { + if (rw) + vals[i] = be32_to_cpu(c.u.addrval.val); + } + } +} + +/** + * t4_read_rss_key - read the global RSS key + * @adap: the adapter + * @key: 10-entry array holding the 320-bit RSS key + * + * Reads the global 320-bit RSS key. + */ +void t4_read_rss_key(struct adapter *adap, u32 *key) +{ + t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1); +} + +/** + * t4_write_rss_key - program one of the RSS keys + * @adap: the adapter + * @key: 10-entry array holding the 320-bit RSS key + * @idx: which RSS key to write + * + * Writes one of the RSS keys with the given 320-bit value. If @idx is + * 0..15 the corresponding entry in the RSS key table is written, + * otherwise the global RSS key is written. + */ +void t4_write_rss_key(struct adapter *adap, u32 *key, int idx) +{ + u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); + u8 rss_key_addr_cnt = 16; + + /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble), + * allows access to key addresses 16-63 by using KeyWrAddrX + * as index[5:4](upper 2) into key table + */ + if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) && + (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) + rss_key_addr_cnt = 32; + + t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0); + + if (idx >= 0 && idx < rss_key_addr_cnt) { + if (rss_key_addr_cnt > 16) + t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, + V_KEYWRADDRX(idx >> 4) | + V_T6_VFWRADDR(idx) | F_KEYWREN); + else + t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, + V_KEYWRADDR(idx) | F_KEYWREN); + } +} + /** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter @@ -2257,7 +2311,11 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, * Send this portion of the RRS table update to the firmware; * bail out on any errors. */ - ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + if (is_pf4(adapter)) + ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), + NULL); + else + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); if (ret) return ret; } @@ -2287,7 +2345,44 @@ int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); - return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); + if (is_pf4(adapter)) + return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL); +} + +/** + * t4_read_config_vi_rss - read the configured per VI RSS settings + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: the VI id + * @flags: where to place the configured flags + * @defq: where to place the id of the default RSS queue for the VI. + * + * Read configured VI-specific RSS properties. + */ +int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + u64 *flags, unsigned int *defq) +{ + struct fw_rss_vi_config_cmd c; + unsigned int result; + int ret; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c); + if (!ret) { + result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen); + if (defq) + *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result); + if (flags) + *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ; + } + + return ret; } /** @@ -2670,14 +2765,142 @@ void t4_dump_version_info(struct adapter *adapter) G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers)); } -#define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \ - FW_PORT_CAP_ANEG) +#define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \ + FW_PORT_CAP32_ANEG) +/** + * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits + * @caps16: a 16-bit Port Capabilities value + * + * Returns the equivalent 32-bit Port Capabilities value. + */ +fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) +{ + fw_port_cap32_t caps32 = 0; + +#define CAP16_TO_CAP32(__cap) \ + do { \ + if (caps16 & FW_PORT_CAP_##__cap) \ + caps32 |= FW_PORT_CAP32_##__cap; \ + } while (0) + + CAP16_TO_CAP32(SPEED_100M); + CAP16_TO_CAP32(SPEED_1G); + CAP16_TO_CAP32(SPEED_25G); + CAP16_TO_CAP32(SPEED_10G); + CAP16_TO_CAP32(SPEED_40G); + CAP16_TO_CAP32(SPEED_100G); + CAP16_TO_CAP32(FC_RX); + CAP16_TO_CAP32(FC_TX); + CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(MDIX); + CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(FEC_RS); + CAP16_TO_CAP32(FEC_BASER_RS); + CAP16_TO_CAP32(802_3_PAUSE); + CAP16_TO_CAP32(802_3_ASM_DIR); + +#undef CAP16_TO_CAP32 + + return caps32; +} + +/** + * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits + * @caps32: a 32-bit Port Capabilities value + * + * Returns the equivalent 16-bit Port Capabilities value. Note that + * not all 32-bit Port Capabilities can be represented in the 16-bit + * Port Capabilities and some fields/values may not make it. + */ +static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) +{ + fw_port_cap16_t caps16 = 0; + +#define CAP32_TO_CAP16(__cap) \ + do { \ + if (caps32 & FW_PORT_CAP32_##__cap) \ + caps16 |= FW_PORT_CAP_##__cap; \ + } while (0) + + CAP32_TO_CAP16(SPEED_100M); + CAP32_TO_CAP16(SPEED_1G); + CAP32_TO_CAP16(SPEED_10G); + CAP32_TO_CAP16(SPEED_25G); + CAP32_TO_CAP16(SPEED_40G); + CAP32_TO_CAP16(SPEED_100G); + CAP32_TO_CAP16(FC_RX); + CAP32_TO_CAP16(FC_TX); + CAP32_TO_CAP16(802_3_PAUSE); + CAP32_TO_CAP16(802_3_ASM_DIR); + CAP32_TO_CAP16(ANEG); + CAP32_TO_CAP16(MDIX); + CAP32_TO_CAP16(MDIAUTO); + CAP32_TO_CAP16(FEC_RS); + CAP32_TO_CAP16(FEC_BASER_RS); + +#undef CAP32_TO_CAP16 + + return caps16; +} + +/* Translate Firmware Pause specification to Common Code */ +static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause) +{ + enum cc_pause cc_pause = 0; + + if (fw_pause & FW_PORT_CAP32_FC_RX) + cc_pause |= PAUSE_RX; + if (fw_pause & FW_PORT_CAP32_FC_TX) + cc_pause |= PAUSE_TX; + + return cc_pause; +} + +/* Translate Common Code Pause Frame specification into Firmware */ +static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) +{ + fw_port_cap32_t fw_pause = 0; + + if (cc_pause & PAUSE_RX) + fw_pause |= FW_PORT_CAP32_FC_RX; + if (cc_pause & PAUSE_TX) + fw_pause |= FW_PORT_CAP32_FC_TX; + + return fw_pause; +} + +/* Translate Firmware Forward Error Correction specification to Common Code */ +static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) +{ + enum cc_fec cc_fec = 0; + + if (fw_fec & FW_PORT_CAP32_FEC_RS) + cc_fec |= FEC_RS; + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +/* Translate Common Code Forward Error Correction specification to Firmware */ +static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) +{ + fw_port_cap32_t fw_fec = 0; + + if (cc_fec & FEC_RS) + fw_fec |= FW_PORT_CAP32_FEC_RS; + if (cc_fec & FEC_BASER_RS) + fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; + + return fw_fec; +} /** * t4_link_l1cfg - apply link configuration to MAC/PHY - * @phy: the PHY to setup - * @mac: the MAC to setup - * @lc: the requested link configuration + * @adapter: the adapter + * @mbox: the Firmware Mailbox to use + * @port: the Port ID + * @lc: the Port's Link Configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then @@ -2689,48 +2912,60 @@ void t4_dump_version_info(struct adapter *adapter) int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { - struct fw_port_cmd c; - unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); - unsigned int fc, fec; + unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO); + unsigned int fw_caps = adap->params.fw_caps_support; + fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap; + struct fw_port_cmd cmd; lc->link_ok = 0; - fc = 0; - if (lc->requested_fc & PAUSE_RX) - fc |= FW_PORT_CAP_FC_RX; - if (lc->requested_fc & PAUSE_TX) - fc |= FW_PORT_CAP_FC_TX; - - fec = 0; - if (lc->requested_fec & FEC_RS) - fec |= FW_PORT_CAP_FEC_RS; - if (lc->requested_fec & FEC_BASER_RS) - fec |= FW_PORT_CAP_FEC_BASER_RS; - if (lc->requested_fec & FEC_RESERVED) - fec |= FW_PORT_CAP_FEC_RESERVED; - memset(&c, 0, sizeof(c)); - c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | - F_FW_CMD_REQUEST | F_FW_CMD_EXEC | - V_FW_PORT_CMD_PORTID(port)); - c.action_to_len16 = - cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | - FW_LEN16(c)); - - if (!(lc->supported & FW_PORT_CAP_ANEG)) { - c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) | - fc | fec); + fw_fc = cc_to_fwcap_pause(lc->requested_fc); + + /* Convert Common Code Forward Error Control settings into the + * Firmware's API. If the current Requested FEC has "Automatic" + * (IEEE 802.3) specified, then we use whatever the Firmware + * sent us as part of it's IEEE 802.3-based interpratation of + * the Transceiver Module EPROM FEC parameters. Otherwise we + * use whatever is in the current Requested FEC settings. + */ + if (lc->requested_fec & FEC_AUTO) + cc_fec = lc->auto_fec; + else + cc_fec = lc->requested_fec; + fw_fec = cc_to_fwcap_fec(cc_fec); + + /* Figure out what our Requested Port Capabilities are going to be. + */ + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { + rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; - lc->fec = lc->requested_fec; + lc->fec = cc_fec; } else if (lc->autoneg == AUTONEG_DISABLE) { - c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | - fec | mdi); + rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi; lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; - lc->fec = lc->requested_fec; + lc->fec = cc_fec; } else { - c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | fec | mdi); + rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; } - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + /* And send that on to the Firmware ... + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_PORT_CMD_PORTID(port)); + cmd.action_to_len16 = + cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ? + FW_PORT_ACTION_L1_CFG : + FW_PORT_ACTION_L1_CFG32) | + FW_LEN16(cmd)); + + if (fw_caps == FW_CAPS16) + cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); + else + cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap); + + return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL); } /** @@ -3823,12 +4058,17 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) | - V_FW_VI_CMD_VFN(vf)); + F_FW_CMD_EXEC); + if (is_pf4(adap)) + c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) | + V_FW_VI_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (is_pf4(adap)) + return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); } /** @@ -3874,7 +4114,11 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); - return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); + if (is_pf4(adap)) + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, + sleep_ok); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); } /** @@ -3921,7 +4165,10 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, V_FW_VI_MAC_CMD_IDX(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); if (ret == 0) { ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); if (ret >= max_mac_addr) @@ -3955,7 +4202,10 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, V_FW_VI_ENABLE_CMD_EEN(tx_en) | V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | FW_LEN16(c)); - return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); + if (is_pf4(adap)) + return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL); } /** @@ -3996,15 +4246,20 @@ int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | - V_FW_IQ_CMD_VFN(vf)); + F_FW_CMD_EXEC); c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) | V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); c.iqid = cpu_to_be16(iqid); c.fl0id = cpu_to_be16(fl0id); c.fl1id = cpu_to_be16(fl1id); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + if (is_pf4(adap)) { + c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + } else { + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); + } } /** @@ -4028,14 +4283,19 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | - V_FW_IQ_CMD_VFN(vf)); + F_FW_CMD_EXEC); + if (is_pf4(adap)) + c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); c.iqid = cpu_to_be16(iqid); c.fl0id = cpu_to_be16(fl0id); c.fl1id = cpu_to_be16(fl1id); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + if (is_pf4(adap)) + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); } /** @@ -4055,12 +4315,179 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, memset(&c, 0, sizeof(c)); c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | - F_FW_CMD_REQUEST | F_FW_CMD_EXEC | - V_FW_EQ_ETH_CMD_PFN(pf) | - V_FW_EQ_ETH_CMD_VFN(vf)); + F_FW_CMD_REQUEST | F_FW_CMD_EXEC); + if (is_pf4(adap)) + c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + if (is_pf4(adap)) + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + else + return t4vf_wr_mbox(adap, &c, sizeof(c), NULL); +} + +/** + * t4_link_down_rc_str - return a string for a Link Down Reason Code + * @link_down_rc: Link Down Reason Code + * + * Returns a string representation of the Link Down Reason Code. + */ +static const char *t4_link_down_rc_str(unsigned char link_down_rc) +{ + static const char * const reason[] = { + "Link Down", + "Remote Fault", + "Auto-negotiation Failure", + "Reserved", + "Insufficient Airflow", + "Unable To Determine Reason", + "No RX Signal Detected", + "Reserved", + }; + + if (link_down_rc >= ARRAY_SIZE(reason)) + return "Bad Reason Code"; + + return reason[link_down_rc]; +} + +/* Return the highest speed set in the port capabilities, in Mb/s. */ +static unsigned int fwcap_to_speed(fw_port_cap32_t caps) +{ +#define TEST_SPEED_RETURN(__caps_speed, __speed) \ + do { \ + if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return __speed; \ + } while (0) + + TEST_SPEED_RETURN(100G, 100000); + TEST_SPEED_RETURN(50G, 50000); + TEST_SPEED_RETURN(40G, 40000); + TEST_SPEED_RETURN(25G, 25000); + TEST_SPEED_RETURN(10G, 10000); + TEST_SPEED_RETURN(1G, 1000); + TEST_SPEED_RETURN(100M, 100); + +#undef TEST_SPEED_RETURN + + return 0; +} + +/** + * t4_handle_get_port_info - process a FW reply message + * @pi: the port info + * @rpl: start of the FW message + * + * Processes a GET_PORT_INFO FW reply message. + */ +static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) +{ + const struct fw_port_cmd *cmd = (const void *)rpl; + int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16)); + fw_port_cap32_t pcaps, acaps, linkattr; + struct link_config *lc = &pi->link_cfg; + struct adapter *adapter = pi->adapter; + enum fw_port_module_type mod_type; + enum fw_port_type port_type; + unsigned int speed, fc, fec; + int link_ok, linkdnrc; + + /* Extract the various fields from the Port Information message. + */ + switch (action) { + case FW_PORT_ACTION_GET_PORT_INFO: { + u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); + + link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0; + linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus); + port_type = G_FW_PORT_CMD_PTYPE(lstatus); + mod_type = G_FW_PORT_CMD_MODTYPE(lstatus); + pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); + + /* Unfortunately the format of the Link Status in the old + * 16-bit Port Information message isn't the same as the + * 16-bit Port Capabilities bitfield used everywhere else ... + */ + linkattr = 0; + if (lstatus & F_FW_PORT_CMD_RXPAUSE) + linkattr |= FW_PORT_CAP32_FC_RX; + if (lstatus & F_FW_PORT_CMD_TXPAUSE) + linkattr |= FW_PORT_CAP32_FC_TX; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + linkattr |= FW_PORT_CAP32_SPEED_100M; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + linkattr |= FW_PORT_CAP32_SPEED_1G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + linkattr |= FW_PORT_CAP32_SPEED_10G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) + linkattr |= FW_PORT_CAP32_SPEED_25G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + linkattr |= FW_PORT_CAP32_SPEED_40G; + if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) + linkattr |= FW_PORT_CAP32_SPEED_100G; + + break; + } + + case FW_PORT_ACTION_GET_PORT_INFO32: { + u32 lstatus32 = + be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); + + link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0; + linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32); + port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); + mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32); + pcaps = be32_to_cpu(cmd->u.info32.pcaps32); + acaps = be32_to_cpu(cmd->u.info32.acaps32); + linkattr = be32_to_cpu(cmd->u.info32.linkattr32); + break; + } + + default: + dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n", + be32_to_cpu(cmd->action_to_len16)); + return; + } + + fec = fwcap_to_cc_fec(acaps); + + fc = fwcap_to_cc_pause(linkattr); + speed = fwcap_to_speed(linkattr); + + if (mod_type != pi->mod_type) { + lc->auto_fec = fec; + pi->port_type = port_type; + pi->mod_type = mod_type; + t4_os_portmod_changed(adapter, pi->pidx); + } + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc || fec != lc->fec) { /* something changed */ + if (!link_ok && lc->link_ok) { + lc->link_down_rc = linkdnrc; + dev_warn(adap, "Port %d link down, reason: %s\n", + pi->tx_chan, t4_link_down_rc_str(linkdnrc)); + } + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->fec = fec; + lc->pcaps = pcaps; + lc->acaps = acaps & ADVERT_MASK; + + if (lc->acaps & FW_PORT_CAP32_ANEG) { + lc->autoneg = AUTONEG_ENABLE; + } else { + /* When Autoneg is disabled, user needs to set + * single speed. + * Similar to cxgb4_ethtool.c: set_link_ksettings + */ + lc->acaps = 0; + lc->requested_speed = fwcap_to_speed(acaps); + lc->autoneg = AUTONEG_DISABLE; + } + } } /** @@ -4084,67 +4511,21 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) unsigned int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); - if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { + if (opcode == FW_PORT_CMD && + (action == FW_PORT_ACTION_GET_PORT_INFO || + action == FW_PORT_ACTION_GET_PORT_INFO32)) { /* link/module state change message */ - unsigned int speed = 0, fc = 0, i; int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); struct port_info *pi = NULL; - struct link_config *lc; - u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); - int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; - u32 mod = G_FW_PORT_CMD_MODTYPE(stat); - - if (stat & F_FW_PORT_CMD_RXPAUSE) - fc |= PAUSE_RX; - if (stat & F_FW_PORT_CMD_TXPAUSE) - fc |= PAUSE_TX; - if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) - speed = ETH_SPEED_NUM_100M; - else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) - speed = ETH_SPEED_NUM_1G; - else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) - speed = ETH_SPEED_NUM_10G; - else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) - speed = ETH_SPEED_NUM_25G; - else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) - speed = ETH_SPEED_NUM_40G; - else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) - speed = ETH_SPEED_NUM_100G; + int i; for_each_port(adap, i) { pi = adap2pinfo(adap, i); if (pi->tx_chan == chan) break; } - lc = &pi->link_cfg; - if (mod != pi->mod_type) { - pi->mod_type = mod; - t4_os_portmod_changed(adap, i); - } - if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { /* something changed */ - if (!link_ok && lc->link_ok) { - static const char * const reason[] = { - "Link Down", - "Remote Fault", - "Auto-negotiation Failure", - "Reserved", - "Insufficient Airflow", - "Unable To Determine Reason", - "No RX Signal Detected", - "Reserved", - }; - unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat); - - dev_warn(adap, "Port %d link down, reason: %s\n", - chan, reason[rc]); - } - lc->link_ok = link_ok; - lc->speed = speed; - lc->fc = fc; - lc->supported = be16_to_cpu(p->u.info.pcap); - } + t4_handle_get_port_info(pi, rpl); } else { dev_warn(adap, "Unknown firmware reply %d\n", opcode); return -EINVAL; @@ -4173,12 +4554,10 @@ void t4_reset_link_config(struct adapter *adap, int idx) * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. */ -static void init_link_config(struct link_config *lc, unsigned int pcaps, - unsigned int acaps) +void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, + fw_port_cap32_t acaps) { - unsigned int fec; - - lc->supported = pcaps; + lc->pcaps = pcaps; lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = 0; @@ -4188,21 +4567,16 @@ static void init_link_config(struct link_config *lc, unsigned int pcaps, * For Forward Error Control, we default to whatever the Firmware * tells us the Link is currently advertising. */ - fec = 0; - if (acaps & FW_PORT_CAP_FEC_RS) - fec |= FEC_RS; - if (acaps & FW_PORT_CAP_FEC_BASER_RS) - fec |= FEC_BASER_RS; - if (acaps & FW_PORT_CAP_FEC_RESERVED) - fec |= FEC_RESERVED; - lc->requested_fec = fec; - lc->fec = fec; - - if (lc->supported & FW_PORT_CAP_ANEG) { - lc->advertising = lc->supported & ADVERT_MASK; + lc->auto_fec = fwcap_to_cc_fec(acaps); + lc->requested_fec = FEC_AUTO; + lc->fec = lc->auto_fec; + + if (lc->pcaps & FW_PORT_CAP32_ANEG) { + lc->acaps = lc->pcaps & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; } else { - lc->advertising = 0; + lc->acaps = 0; lc->autoneg = AUTONEG_DISABLE; } } @@ -4723,46 +5097,95 @@ int t4_init_rss_mode(struct adapter *adap, int mbox) int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) { - u8 addr[6]; + unsigned int fw_caps = adap->params.fw_caps_support; + fw_port_cap32_t pcaps, acaps; + enum fw_port_type port_type; + struct fw_port_cmd cmd; int ret, i, j = 0; - struct fw_port_cmd c; + int mdio_addr; + u32 action; + u8 addr[6]; - memset(&c, 0, sizeof(c)); + memset(&cmd, 0, sizeof(cmd)); for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); unsigned int rss_size = 0; - struct port_info *p = adap2pinfo(adap, i); while ((adap->params.portvec & (1 << j)) == 0) j++; - c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | - F_FW_CMD_REQUEST | F_FW_CMD_READ | - V_FW_PORT_CMD_PORTID(j)); - c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION( - FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(c)); - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + /* If we haven't yet determined whether we're talking to + * Firmware which knows the new 32-bit Port Capabilities, it's + * time to find out now. This will also tell new Firmware to + * send us Port Status Updates using the new 32-bit Port + * Capabilities version of the Port Information message. + */ + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val, caps; + + caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32; + param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | + V_FW_PARAMS_PARAM_X(caps)); + val = 1; + ret = t4_set_params(adap, mbox, pf, vf, 1, ¶m, + &val); + fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16; + adap->params.fw_caps_support = fw_caps; + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ | + V_FW_PORT_CMD_PORTID(j)); + action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO : + FW_PORT_ACTION_GET_PORT_INFO32; + cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) | + FW_LEN16(cmd)); + ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd); if (ret) return ret; + /* Extract the various fields from the Port Information message. + */ + if (fw_caps == FW_CAPS16) { + u32 lstatus = + be32_to_cpu(cmd.u.info.lstatus_to_modtype); + + port_type = G_FW_PORT_CMD_PTYPE(lstatus); + mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ? + (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1; + pcaps = be16_to_cpu(cmd.u.info.pcap); + acaps = be16_to_cpu(cmd.u.info.acap); + pcaps = fwcaps16_to_caps32(pcaps); + acaps = fwcaps16_to_caps32(acaps); + } else { + u32 lstatus32 = + be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32); + + port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); + mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ? + (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) : + -1; + pcaps = be32_to_cpu(cmd.u.info32.pcaps32); + acaps = be32_to_cpu(cmd.u.info32.acaps32); + } + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); if (ret < 0) return ret; - p->viid = ret; - p->tx_chan = j; - p->rss_size = rss_size; + pi->viid = ret; + pi->tx_chan = j; + pi->rss_size = rss_size; t4_os_set_hw_addr(adap, i, addr); - ret = be32_to_cpu(c.u.info.lstatus_to_modtype); - p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ? - G_FW_PORT_CMD_MDIOADDR(ret) : -1; - p->port_type = G_FW_PORT_CMD_PTYPE(ret); - p->mod_type = FW_PORT_MOD_TYPE_NA; + pi->port_type = port_type; + pi->mdio_addr = mdio_addr; + pi->mod_type = FW_PORT_MOD_TYPE_NA; - init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap), - be16_to_cpu(c.u.info.acap)); + init_link_config(&pi->link_cfg, pcaps, acaps); j++; } return 0; diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h index 07498841..ac12afc0 100644 --- a/drivers/net/cxgbe/base/t4_hw.h +++ b/drivers/net/cxgbe/base/t4_hw.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef __T4_HW_H diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h index 6acd749a..74b4fc19 100644 --- a/drivers/net/cxgbe/base/t4_msg.h +++ b/drivers/net/cxgbe/base/t4_msg.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef T4_MSG_H diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h index 1230e738..5f5cbe04 100644 --- a/drivers/net/cxgbe/base/t4_pci_id_tbl.h +++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef __T4_PCI_ID_TBL_H__ diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h index 1100e16f..c0d6ddca 100644 --- a/drivers/net/cxgbe/base/t4_regs.h +++ b/drivers/net/cxgbe/base/t4_regs.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #define MYPF_BASE 0x1b000 @@ -77,6 +49,7 @@ #define SGE_BASE_ADDR 0x1000 #define A_SGE_PF_KDOORBELL 0x0 +#define A_SGE_VF_KDOORBELL 0x0 #define S_QID 15 #define M_QID 0x1ffffU @@ -103,6 +76,9 @@ #define A_SGE_PF_GTS 0x4 +#define T4VF_SGE_BASE_ADDR 0x0000 +#define A_SGE_VF_GTS 0x4 + #define S_INGRESSQID 16 #define M_INGRESSQID 0xffffU #define V_INGRESSQID(x) ((x) << S_INGRESSQID) @@ -191,6 +167,8 @@ #define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0) #define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0) +#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014 + #define S_ERR_CPL_EXCEED_IQE_SIZE 22 #define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE) #define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U) @@ -280,6 +258,11 @@ #define A_SGE_CONM_CTRL 0x1094 +#define S_T6_EGRTHRESHOLDPACKING 16 +#define M_T6_EGRTHRESHOLDPACKING 0xffU +#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & \ + M_T6_EGRTHRESHOLDPACKING) + #define S_EGRTHRESHOLD 8 #define M_EGRTHRESHOLD 0x3fU #define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD) @@ -370,6 +353,7 @@ #define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5) #define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 +#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8 #define A_SGE_CONTROL2 0x1124 @@ -443,6 +427,8 @@ /* registers for module CIM */ #define CIM_BASE_ADDR 0x7b00 +#define A_CIM_VF_EXT_MAILBOX_CTRL 0x0 + #define A_CIM_PF_MAILBOX_DATA 0x240 #define A_CIM_PF_MAILBOX_CTRL 0x280 @@ -462,6 +448,8 @@ #define V_UPCRST(x) ((x) << S_UPCRST) #define F_UPCRST V_UPCRST(1U) +#define NUM_CIM_PF_MAILBOX_DATA_INSTANCES 16 + /* registers for module TP */ #define A_TP_OUT_CONFIG 0x7d04 @@ -503,9 +491,34 @@ #define V_MTUVALUE(x) ((x) << S_MTUVALUE) #define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE) +#define A_TP_RSS_CONFIG_VRT 0x7e00 + +#define S_KEYMODE 6 +#define M_KEYMODE 0x3U +#define G_KEYMODE(x) (((x) >> S_KEYMODE) & M_KEYMODE) + +#define S_KEYWRADDR 0 +#define V_KEYWRADDR(x) ((x) << S_KEYWRADDR) + +#define S_KEYWREN 4 +#define V_KEYWREN(x) ((x) << S_KEYWREN) +#define F_KEYWREN V_KEYWREN(1U) + +#define S_KEYWRADDRX 30 +#define V_KEYWRADDRX(x) ((x) << S_KEYWRADDRX) + +#define S_KEYEXTEND 26 +#define V_KEYEXTEND(x) ((x) << S_KEYEXTEND) +#define F_KEYEXTEND V_KEYEXTEND(1U) + +#define S_T6_VFWRADDR 8 +#define V_T6_VFWRADDR(x) ((x) << S_T6_VFWRADDR) + #define A_TP_PIO_ADDR 0x7e40 #define A_TP_PIO_DATA 0x7e44 +#define A_TP_RSS_SECRET_KEY0 0x40 + #define A_TP_VLAN_PRI_MAP 0x140 #define S_FRAGMENTATION 9 @@ -558,8 +571,12 @@ #define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR) #define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U) +#define S_RM_OVLAN 9 +#define V_RM_OVLAN(x) ((x) << S_RM_OVLAN) + /* registers for module MPS */ #define MPS_BASE_ADDR 0x9000 +#define T4VF_MPS_BASE_ADDR 0x0100 #define S_REPLICATE 11 #define V_REPLICATE(x) ((x) << S_REPLICATE) @@ -766,6 +783,66 @@ #define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 #define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc +#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88 +#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_L 0x98 +#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_L 0xa8 +#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_L 0xb0 +#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_L 0xd0 +#define A_MPS_VF_STAT_RX_VF_MCAST_FRAMES_L 0xe0 +#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0 +#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8 + +#define A_MPS_PORT0_RX_IVLAN 0x3011c + +#define S_IVLAN_ETYPE 0 +#define M_IVLAN_ETYPE 0xffffU +#define V_IVLAN_ETYPE(x) ((x) << S_IVLAN_ETYPE) + +#define MPS_PORT_RX_IVLAN_STRIDE 0x4000 +#define MPS_PORT_RX_IVLAN(idx) \ + (A_MPS_PORT0_RX_IVLAN + (idx) * MPS_PORT_RX_IVLAN_STRIDE) + +#define A_MPS_PORT0_RX_OVLAN0 0x30120 + +#define S_OVLAN_MASK 16 +#define M_OVLAN_MASK 0xffffU +#define V_OVLAN_MASK(x) ((x) << S_OVLAN_MASK) + +#define S_OVLAN_ETYPE 0 +#define M_OVLAN_ETYPE 0xffffU +#define V_OVLAN_ETYPE(x) ((x) << S_OVLAN_ETYPE) + +#define MPS_PORT_RX_OVLAN_STRIDE 0x4000 +#define MPS_PORT_RX_OVLAN_BASE(idx) \ +(A_MPS_PORT0_RX_OVLAN0 + (idx) * MPS_PORT_RX_OVLAN_STRIDE) +#define MPS_PORT_RX_OVLAN_REG(idx, reg) (MPS_PORT_RX_OVLAN_BASE(idx) + (reg)) + +#define A_RX_OVLAN0 0x0 +#define A_RX_OVLAN1 0x4 +#define A_RX_OVLAN2 0x8 + +#define A_MPS_PORT0_RX_CTL 0x30100 + +#define S_OVLAN_EN0 0 +#define V_OVLAN_EN0(x) ((x) << S_OVLAN_EN0) +#define F_OVLAN_EN0 V_OVLAN_EN0(1) + +#define S_OVLAN_EN1 1 +#define V_OVLAN_EN1(x) ((x) << S_OVLAN_EN1) +#define F_OVLAN_EN1 V_OVLAN_EN1(1) + +#define S_OVLAN_EN2 2 +#define V_OVLAN_EN2(x) ((x) << S_OVLAN_EN2) +#define F_OVLAN_EN2 V_OVLAN_EN2(1) + +#define S_IVLAN_EN 4 +#define V_IVLAN_EN(x) ((x) << S_IVLAN_EN) +#define F_IVLAN_EN V_IVLAN_EN(1) + +#define MPS_PORT_RX_CTL_STRIDE 0x4000 +#define MPS_PORT_RX_CTL(idx) \ + (A_MPS_PORT0_RX_CTL + (idx) * MPS_PORT_RX_CTL_STRIDE) + /* registers for module ULP_RX */ #define ULP_RX_BASE_ADDR 0x19150 @@ -823,6 +900,7 @@ #define F_PFCIM V_PFCIM(1U) #define A_PL_WHOAMI 0x19400 +#define A_PL_VF_WHOAMI 0x0 #define A_PL_RST 0x19428 @@ -837,6 +915,7 @@ #define F_PIORSTMODE V_PIORSTMODE(1U) #define A_PL_REV 0x1943c +#define A_PL_VF_REV 0x4 #define S_REV 0 #define M_REV 0xfU diff --git a/drivers/net/cxgbe/base/t4_regs_values.h b/drivers/net/cxgbe/base/t4_regs_values.h index 9085ff6d..a9414d20 100644 --- a/drivers/net/cxgbe/base/t4_regs_values.h +++ b/drivers/net/cxgbe/base/t4_regs_values.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef __T4_REGS_VALUES_H__ diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h index 6ca4f318..852e8f3c 100644 --- a/drivers/net/cxgbe/base/t4fw_interface.h +++ b/drivers/net/cxgbe/base/t4fw_interface.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef _T4FW_INTERFACE_H_ @@ -84,6 +56,8 @@ enum fw_memtype { enum fw_wr_opcodes { FW_ETH_TX_PKT_WR = 0x08, FW_ETH_TX_PKTS_WR = 0x09, + FW_ETH_TX_PKT_VM_WR = 0x11, + FW_ETH_TX_PKTS_VM_WR = 0x12, FW_ETH_TX_PKTS2_WR = 0x78, }; @@ -146,6 +120,29 @@ struct fw_eth_tx_pkts_wr { __u8 type; }; +struct fw_eth_tx_pkt_vm_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be32 r3[2]; + __u8 ethmacdst[6]; + __u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + +struct fw_eth_tx_pkts_vm_wr { + __be32 op_pkd; + __be32 equiq_to_len16; + __be32 r3; + __be16 plen; + __u8 npkt; + __u8 r4; + __u8 ethmacdst[6]; + __u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + /****************************************************************************** * C O M M A N D s *********************/ @@ -171,24 +168,32 @@ struct fw_eth_tx_pkts_wr { #define FW_CMD_HELLO_RETRIES 3 enum fw_cmd_opcodes { + FW_LDST_CMD = 0x01, FW_RESET_CMD = 0x03, FW_HELLO_CMD = 0x04, FW_BYE_CMD = 0x05, FW_INITIALIZE_CMD = 0x06, FW_CAPS_CONFIG_CMD = 0x07, FW_PARAMS_CMD = 0x08, + FW_PFVF_CMD = 0x09, FW_IQ_CMD = 0x10, FW_EQ_ETH_CMD = 0x12, FW_VI_CMD = 0x14, FW_VI_MAC_CMD = 0x15, FW_VI_RXMODE_CMD = 0x16, FW_VI_ENABLE_CMD = 0x17, + FW_VI_STATS_CMD = 0x1a, FW_PORT_CMD = 0x1b, FW_RSS_IND_TBL_CMD = 0x20, + FW_RSS_GLB_CONFIG_CMD = 0x22, FW_RSS_VI_CONFIG_CMD = 0x23, FW_DEBUG_CMD = 0x81, }; +enum fw_cmd_cap { + FW_CMD_CAP_PORT = 0x04, +}; + /* * Generic command header flit0 */ @@ -238,6 +243,94 @@ struct fw_cmd_hdr { #define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16) +/* address spaces + */ +enum fw_ldst_addrspc { + FW_LDST_ADDRSPC_TP_PIO = 0x0010, +}; + +struct fw_ldst_cmd { + __be32 op_to_addrspace; + __be32 cycles_to_len16; + union fw_ldst { + struct fw_ldst_addrval { + __be32 addr; + __be32 val; + } addrval; + struct fw_ldst_idctxt { + __be32 physid; + __be32 msg_ctxtflush; + __be32 ctxt_data7; + __be32 ctxt_data6; + __be32 ctxt_data5; + __be32 ctxt_data4; + __be32 ctxt_data3; + __be32 ctxt_data2; + __be32 ctxt_data1; + __be32 ctxt_data0; + } idctxt; + struct fw_ldst_mdio { + __be16 paddr_mmd; + __be16 raddr; + __be16 vctl; + __be16 rval; + } mdio; + struct fw_ldst_mps { + __be16 fid_ctl; + __be16 rplcpf_pkd; + __be32 rplc127_96; + __be32 rplc95_64; + __be32 rplc63_32; + __be32 rplc31_0; + __be32 atrb; + __be16 vlan[16]; + } mps; + struct fw_ldst_func { + __u8 access_ctl; + __u8 mod_index; + __be16 ctl_id; + __be32 offset; + __be64 data0; + __be64 data1; + } func; + struct fw_ldst_pcie { + __u8 ctrl_to_fn; + __u8 bnum; + __u8 r; + __u8 ext_r; + __u8 select_naccess; + __u8 pcie_fn; + __be16 nset_pkd; + __be32 data[12]; + } pcie; + struct fw_ldst_i2c_deprecated { + __u8 pid_pkd; + __u8 base; + __u8 boffset; + __u8 data; + __be32 r9; + } i2c_deprecated; + struct fw_ldst_i2c { + __u8 pid; + __u8 did; + __u8 boffset; + __u8 blen; + __be32 r9; + __u8 data[48]; + } i2c; + struct fw_ldst_le { + __be32 index; + __be32 r9; + __u8 val[33]; + __u8 r11[7]; + } le; + } u; +}; + +#define S_FW_LDST_CMD_ADDRSPACE 0 +#define M_FW_LDST_CMD_ADDRSPACE 0xff +#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE) + struct fw_reset_cmd { __be32 op_to_write; __be32 retval_len16; @@ -386,6 +479,7 @@ struct fw_caps_config_cmd { enum fw_params_mnem { FW_PARAMS_MNEM_DEV = 1, /* device params */ FW_PARAMS_MNEM_PFVF = 2, /* function params */ + FW_PARAMS_MNEM_REG = 3, /* limited register access */ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ }; @@ -395,6 +489,8 @@ enum fw_params_mnem { enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ + FW_PARAMS_PARAM_DEV_FWREV = 0x0B, /* fw version */ + FW_PARAMS_PARAM_DEV_TPREV = 0x0C, /* tp version */ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, }; @@ -402,7 +498,8 @@ enum fw_params_param_dev { * physical and virtual function parameters */ enum fw_params_param_pfvf { - FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31 + FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31, + FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A }; /* @@ -443,6 +540,10 @@ enum fw_params_param_dmaq { #define G_FW_PARAMS_PARAM_YZ(x) \ (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ) +#define S_FW_PARAMS_PARAM_XYZ 0 +#define M_FW_PARAMS_PARAM_XYZ 0xffffff +#define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ) + struct fw_params_cmd { __be32 op_to_vfn; __be32 retval_len16; @@ -464,6 +565,68 @@ struct fw_params_cmd { #define G_FW_PARAMS_CMD_VFN(x) \ (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN) +struct fw_pfvf_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + __be32 niqflint_niq; + __be32 type_to_neq; + __be32 tc_to_nexactf; + __be32 r_caps_to_nethctrl; + __be16 nricq; + __be16 nriqp; + __be32 r4; +}; + +#define S_FW_PFVF_CMD_NIQFLINT 20 +#define M_FW_PFVF_CMD_NIQFLINT 0xfff +#define G_FW_PFVF_CMD_NIQFLINT(x) \ + (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT) + +#define S_FW_PFVF_CMD_NIQ 0 +#define M_FW_PFVF_CMD_NIQ 0xfffff +#define G_FW_PFVF_CMD_NIQ(x) \ + (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ) + +#define S_FW_PFVF_CMD_PMASK 20 +#define M_FW_PFVF_CMD_PMASK 0xf +#define G_FW_PFVF_CMD_PMASK(x) \ + (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK) + +#define S_FW_PFVF_CMD_NEQ 0 +#define M_FW_PFVF_CMD_NEQ 0xfffff +#define G_FW_PFVF_CMD_NEQ(x) \ + (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ) + +#define S_FW_PFVF_CMD_TC 24 +#define M_FW_PFVF_CMD_TC 0xff +#define G_FW_PFVF_CMD_TC(x) \ + (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC) + +#define S_FW_PFVF_CMD_NVI 16 +#define M_FW_PFVF_CMD_NVI 0xff +#define G_FW_PFVF_CMD_NVI(x) \ + (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI) + +#define S_FW_PFVF_CMD_NEXACTF 0 +#define M_FW_PFVF_CMD_NEXACTF 0xffff +#define G_FW_PFVF_CMD_NEXACTF(x) \ + (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF) + +#define S_FW_PFVF_CMD_R_CAPS 24 +#define M_FW_PFVF_CMD_R_CAPS 0xff +#define G_FW_PFVF_CMD_R_CAPS(x) \ + (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS) + +#define S_FW_PFVF_CMD_WX_CAPS 16 +#define M_FW_PFVF_CMD_WX_CAPS 0xff +#define G_FW_PFVF_CMD_WX_CAPS(x) \ + (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS) + +#define S_FW_PFVF_CMD_NETHCTRL 0 +#define M_FW_PFVF_CMD_NETHCTRL 0xffff +#define G_FW_PFVF_CMD_NETHCTRL(x) \ + (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL) + /* * ingress queue type; the first 1K ingress queues can have associated 0, * 1 or 2 free lists and an interrupt, all other ingress queues lack these @@ -724,6 +887,11 @@ struct fw_eq_eth_cmd { #define G_FW_EQ_ETH_CMD_EQID(x) \ (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID) +#define S_FW_EQ_ETH_CMD_PHYSEQID 0 +#define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff +#define G_FW_EQ_ETH_CMD_PHYSEQID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID) + #define S_FW_EQ_ETH_CMD_FETCHRO 22 #define M_FW_EQ_ETH_CMD_FETCHRO 0x1 #define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO) @@ -988,6 +1156,9 @@ struct fw_vi_enable_cmd { (((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO) #define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U) +/* VI VF stats offset definitions */ +#define VI_VF_NUM_STATS 16 + /* VI PF stats offset definitions */ #define VI_PF_NUM_STATS 17 enum fw_vi_stats_pf_index { @@ -1065,7 +1236,16 @@ struct fw_vi_stats_cmd { } u; }; -/* port capabilities bitmap */ +#define S_FW_VI_STATS_CMD_VIID 0 +#define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID) + +#define S_FW_VI_STATS_CMD_NSTATS 12 +#define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS) + +#define S_FW_VI_STATS_CMD_IX 0 +#define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX) + +/* old 16-bit port capabilities bitmap */ enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, FW_PORT_CAP_SPEED_1G = 0x0002, @@ -1100,9 +1280,45 @@ enum fw_port_mdi { #define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI) #define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI) +/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */ +#define FW_PORT_CAP32_SPEED_100M 0x00000001UL +#define FW_PORT_CAP32_SPEED_1G 0x00000002UL +#define FW_PORT_CAP32_SPEED_10G 0x00000004UL +#define FW_PORT_CAP32_SPEED_25G 0x00000008UL +#define FW_PORT_CAP32_SPEED_40G 0x00000010UL +#define FW_PORT_CAP32_SPEED_50G 0x00000020UL +#define FW_PORT_CAP32_SPEED_100G 0x00000040UL +#define FW_PORT_CAP32_FC_RX 0x00010000UL +#define FW_PORT_CAP32_FC_TX 0x00020000UL +#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL +#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL +#define FW_PORT_CAP32_ANEG 0x00100000UL +#define FW_PORT_CAP32_MDIX 0x00200000UL +#define FW_PORT_CAP32_MDIAUTO 0x00400000UL +#define FW_PORT_CAP32_FEC_RS 0x00800000UL +#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL + +#define S_FW_PORT_CAP32_SPEED 0 +#define M_FW_PORT_CAP32_SPEED 0xfff +#define V_FW_PORT_CAP32_SPEED(x) ((x) << S_FW_PORT_CAP32_SPEED) +#define G_FW_PORT_CAP32_SPEED(x) \ + (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED) + +enum fw_port_mdi32 { + FW_PORT_CAP32_MDI_AUTO, +}; + +#define S_FW_PORT_CAP32_MDI 21 +#define M_FW_PORT_CAP32_MDI 3 +#define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI) +#define G_FW_PORT_CAP32_MDI(x) \ + (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI) + enum fw_port_action { FW_PORT_ACTION_L1_CFG = 0x0001, FW_PORT_ACTION_GET_PORT_INFO = 0x0003, + FW_PORT_ACTION_L1_CFG32 = 0x0009, + FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a, }; struct fw_port_cmd { @@ -1191,6 +1407,18 @@ struct fw_port_cmd { __be64 r12; } control; } dcb; + struct fw_port_l1cfg32 { + __be32 rcap32; + __be32 r; + } l1cfg32; + struct fw_port_info32 { + __be32 lstatus32_to_cbllen32; + __be32 auxlinfo32_mtu32; + __be32 linkattr32; + __be32 pcaps32; + __be32 acaps32; + __be32 lpacaps32; + } info32; } u; }; @@ -1264,6 +1492,36 @@ struct fw_port_cmd { #define G_FW_PORT_CMD_MODTYPE(x) \ (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE) +#define S_FW_PORT_CMD_LSTATUS32 31 +#define M_FW_PORT_CMD_LSTATUS32 0x1 +#define V_FW_PORT_CMD_LSTATUS32(x) ((x) << S_FW_PORT_CMD_LSTATUS32) +#define F_FW_PORT_CMD_LSTATUS32 V_FW_PORT_CMD_LSTATUS32(1U) + +#define S_FW_PORT_CMD_LINKDNRC32 28 +#define M_FW_PORT_CMD_LINKDNRC32 0x7 +#define G_FW_PORT_CMD_LINKDNRC32(x) \ + (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32) + +#define S_FW_PORT_CMD_MDIOCAP32 26 +#define M_FW_PORT_CMD_MDIOCAP32 0x1 +#define V_FW_PORT_CMD_MDIOCAP32(x) ((x) << S_FW_PORT_CMD_MDIOCAP32) +#define F_FW_PORT_CMD_MDIOCAP32 V_FW_PORT_CMD_MDIOCAP32(1U) + +#define S_FW_PORT_CMD_MDIOADDR32 21 +#define M_FW_PORT_CMD_MDIOADDR32 0x1f +#define G_FW_PORT_CMD_MDIOADDR32(x) \ + (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32) + +#define S_FW_PORT_CMD_PORTTYPE32 13 +#define M_FW_PORT_CMD_PORTTYPE32 0xff +#define G_FW_PORT_CMD_PORTTYPE32(x) \ + (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32) + +#define S_FW_PORT_CMD_MODTYPE32 8 +#define M_FW_PORT_CMD_MODTYPE32 0x1f +#define G_FW_PORT_CMD_MODTYPE32(x) \ + (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32) + /* * These are configured into the VPD and hence tools that generate * VPD may use this enumeration. @@ -1532,6 +1790,83 @@ struct fw_rss_ind_tbl_cmd { #define G_FW_RSS_IND_TBL_CMD_IQ2(x) \ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2) +struct fw_rss_glb_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + union fw_rss_glb_config { + struct fw_rss_glb_config_manual { + __be32 mode_pkd; + __be32 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_glb_config_basicvirtual { + __be32 mode_keymode; + __be32 synmapen_to_hashtoeplitz; + __be64 r8; + __be64 r9; + } basicvirtual; + } u; +}; + +#define S_FW_RSS_GLB_CONFIG_CMD_MODE 28 +#define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf +#define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \ + (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE) + +#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 + +#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8 +#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) +#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4 +#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) +#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \ + V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3 +#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) +#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2 +#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) +#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1 +#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) +#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \ + V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U) + +#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0 +#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \ + ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) +#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \ + V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U) + struct fw_rss_vi_config_cmd { __be32 op_to_viid; __be32 retval_len16; diff --git a/drivers/net/cxgbe/base/t4vf_hw.c b/drivers/net/cxgbe/base/t4vf_hw.c new file mode 100644 index 00000000..9fd0b879 --- /dev/null +++ b/drivers/net/cxgbe/base/t4vf_hw.c @@ -0,0 +1,874 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include + +#include "common.h" +#include "t4_regs.h" + +/** + * t4vf_wait_dev_ready - wait till to reads of registers work + * + * Wait for the device to become ready (signified by our "who am I" register + * returning a value other than all 1's). Return an error if it doesn't + * become ready ... + */ +static int t4vf_wait_dev_ready(struct adapter *adapter) +{ + const u32 whoami = T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI; + const u32 notready1 = 0xffffffff; + const u32 notready2 = 0xeeeeeeee; + u32 val; + + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + + msleep(500); + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + + dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n", + val); + return -EIO; +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr)); +} + +/** + * t4vf_wr_mbox_core - send a command to FW through the mailbox + * @adapter: the adapter + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the mailbox and waits for the + * FW to execute the command. If @rpl is not %NULL it is used to store + * the FW's reply to the command. The command and its optional reply + * are of the same length. FW can take up to 500 ms to respond. + * @sleep_ok determines whether we may sleep while awaiting the response. + * If sleeping is allowed we use progressive backoff otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4vf_wr_mbox_core(struct adapter *adapter, + const void __attribute__((__may_alias__)) *cmd, + int size, void *rpl, bool sleep_ok) +{ + /* + * We delay in small increments at first in an effort to maintain + * responsiveness for simple, fast executing commands but then back + * off to larger delays to a maximum retry delay. + */ + static const int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100 + }; + + + u32 mbox_ctl = T4VF_CIM_BASE_ADDR + A_CIM_VF_EXT_MAILBOX_CTRL; + __be64 cmd_rpl[MBOX_LEN / 8]; + struct mbox_entry entry; + unsigned int delay_idx; + u32 v, mbox_data; + const __be64 *p; + int i, ret; + int ms; + + /* In T6, mailbox size is changed to 128 bytes to avoid + * invalidating the entire prefetch buffer. + */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + mbox_data = T4VF_MBDATA_BASE_ADDR; + else + mbox_data = T6VF_MBDATA_BASE_ADDR; + + /* + * Commands must be multiples of 16 bytes in length and may not be + * larger than the size of the Mailbox Data register array. + */ + if ((size % 16) != 0 || + size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) + return -EINVAL; + + /* + * Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + t4_os_atomic_add_tail(&entry, &adapter->mbox_list, &adapter->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* + * If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rarely + * contend on access to the mailbox ... + */ + if (i > (2 * FW_CMD_MAX_TIMEOUT)) { + t4_os_atomic_list_del(&entry, &adapter->mbox_list, + &adapter->mbox_lock); + ret = -EBUSY; + return ret; + } + + /* + * If we're at the head, break out and start the mailbox + * protocol. + */ + if (t4_os_list_first_entry(&adapter->mbox_list) == &entry) + break; + + /* + * Delay for a bit before checking again ... + */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + rte_delay_ms(ms); + } + } + + /* + * Loop trying to get ownership of the mailbox. Return an error + * if we can't gain ownership. + */ + v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl)); + for (i = 0; v == X_MBOWNER_NONE && i < 3; i++) + v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl)); + + if (v != X_MBOWNER_PL) { + t4_os_atomic_list_del(&entry, &adapter->mbox_list, + &adapter->mbox_lock); + ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; + return ret; + } + + /* + * Write the command array into the Mailbox Data register array and + * transfer ownership of the mailbox to the firmware. + */ + for (i = 0, p = cmd; i < size; i += 8) + t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); + + t4_read_reg(adapter, mbox_data); /* flush write */ + t4_write_reg(adapter, mbox_ctl, + F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); + t4_read_reg(adapter, mbox_ctl); /* flush write */ + delay_idx = 0; + ms = delay[0]; + + /* + * Spin waiting for firmware to acknowledge processing our command. + */ + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i++) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + rte_delay_ms(ms); + } + + /* + * If we're the owner, see if this is the reply we wanted. + */ + v = t4_read_reg(adapter, mbox_ctl); + if (G_MBOWNER(v) == X_MBOWNER_PL) { + /* + * If the Message Valid bit isn't on, revoke ownership + * of the mailbox and continue waiting for our reply. + */ + if ((v & F_MBMSGVALID) == 0) { + t4_write_reg(adapter, mbox_ctl, + V_MBOWNER(X_MBOWNER_NONE)); + continue; + } + + /* + * We now have our reply. Extract the command return + * value, copy the reply back to our caller's buffer + * (if specified) and revoke ownership of the mailbox. + * We return the (negated) firmware command return + * code (this depends on FW_SUCCESS == 0). (Again we + * avoid clogging the log with FW_VI_STATS_CMD + * reply results.) + */ + + /* + * Retrieve the command reply and release the mailbox. + */ + get_mbox_rpl(adapter, cmd_rpl, size / 8, mbox_data); + t4_write_reg(adapter, mbox_ctl, + V_MBOWNER(X_MBOWNER_NONE)); + t4_os_atomic_list_del(&entry, &adapter->mbox_list, + &adapter->mbox_lock); + + /* return value in high-order host-endian word */ + v = be64_to_cpu(cmd_rpl[0]); + + if (rpl) { + /* request bit in high-order BE word */ + WARN_ON((be32_to_cpu(*(const u32 *)cmd) + & F_FW_CMD_REQUEST) == 0); + memcpy(rpl, cmd_rpl, size); + } + return -((int)G_FW_CMD_RETVAL(v)); + } + } + + /* + * We timed out. Return the error ... + */ + dev_err(adapter, "command %#x timed out\n", + *(const u8 *)cmd); + dev_err(adapter, " Control = %#x\n", t4_read_reg(adapter, mbox_ctl)); + t4_os_atomic_list_del(&entry, &adapter->mbox_list, &adapter->mbox_lock); + ret = -ETIMEDOUT; + return ret; +} + +/** + * t4vf_fw_reset - issue a reset to FW + * @adapter: the adapter + * + * Issues a reset command to FW. For a Physical Function this would + * result in the Firmware resetting all of its state. For a Virtual + * Function this just resets the state associated with the VF. + */ +int t4vf_fw_reset(struct adapter *adapter) +{ + struct fw_reset_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) | + F_FW_CMD_WRITE); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd))); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int t4vf_prep_adapter(struct adapter *adapter) +{ + u32 pl_vf_rev; + int ret, ver; + + ret = t4vf_wait_dev_ready(adapter); + if (ret < 0) + return ret; + + /* + * Default port and clock for debugging in case we can't reach + * firmware. + */ + adapter->params.nports = 1; + adapter->params.vfres.pmask = 1; + adapter->params.vpd.cclk = 50000; + + pl_vf_rev = G_REV(t4_read_reg(adapter, A_PL_VF_REV)); + adapter->params.pci.device_id = adapter->pdev->id.device_id; + adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id; + + /* + * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS + * ADAPTER (VERSION << 4 | REVISION) + */ + ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id); + adapter->params.chip = 0; + switch (ver) { + case CHELSIO_T5: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, + pl_vf_rev); + adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + break; + case CHELSIO_T6: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, + pl_vf_rev); + adapter->params.arch.sge_fl_db = 0; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + break; + default: + dev_err(adapter, "%s: Device %d is not supported\n", + __func__, adapter->params.pci.device_id); + return -EINVAL; + } + return 0; +} + +/** + * t4vf_query_params - query FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Reads the values of firmware or device parameters. Up to 7 parameters + * can be queried at once. + */ +int t4vf_query_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, u32 *vals) +{ + struct fw_params_cmd cmd, rpl; + struct fw_params_param *p; + unsigned int i; + size_t len16; + int ret; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams]), 16); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) + p->mnem = cpu_to_be32(*params++); + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret == 0) + for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) + *vals++ = be32_to_cpu(p->val); + return ret; +} + +/** + * t4vf_get_vpd_params - retrieve device VPD paremeters + * @adapter: the adapter + * + * Retrives various device Vital Product Data parameters. The parameters + * are stored in @adapter->params.vpd. + */ +int t4vf_get_vpd_params(struct adapter *adapter) +{ + struct vpd_params *vpd_params = &adapter->params.vpd; + u32 params[7], vals[7]; + int v; + + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v != FW_SUCCESS) + return v; + vpd_params->cclk = vals[0]; + dev_debug(adapter, "%s: vpd_params->cclk = %u\n", + __func__, vpd_params->cclk); + return 0; +} + +/** + * t4vf_get_dev_params - retrieve device paremeters + * @adapter: the adapter + * + * Retrives fw and tp version. + */ +int t4vf_get_dev_params(struct adapter *adapter) +{ + u32 params[7], vals[7]; + int v; + + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v != FW_SUCCESS) + return v; + adapter->params.fw_vers = vals[0]; + adapter->params.tp_vers = vals[1]; + + dev_info(adapter, "Firmware version: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers), + G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers), + G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers), + G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers)); + + dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n", + G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers), + G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers), + G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers), + G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers)); + return 0; +} + +/** + * t4vf_set_params - sets FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Sets the values of firmware or device parameters. Up to 7 parameters + * can be specified at once. + */ +int t4vf_set_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, const u32 *vals) +{ + struct fw_params_param *p; + struct fw_params_cmd cmd; + unsigned int i; + size_t len16; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_WRITE); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams]), 16); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { + p->mnem = cpu_to_be32(*params++); + p->val = cpu_to_be32(*vals++); + } + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_fl_pkt_align - return the fl packet alignment + * @adapter: the adapter + * + * T4 has a single field to specify the packing and padding boundary. + * T5 onwards has separate fields for this and hence the alignment for + * next packet offset is maximum of these two. + */ +int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control, + u32 sge_control2) +{ + unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. + */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + ingpad_shift = X_INGPADBOUNDARY_SHIFT; + else + ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT; + + ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift); + + fl_align = ingpadboundary; + if (!is_t4(adapter->params.chip)) { + ingpackboundary = G_INGPACKBOUNDARY(sge_control2); + if (ingpackboundary == X_INGPACKBOUNDARY_16B) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + X_INGPACKBOUNDARY_SHIFT); + + fl_align = max(ingpadboundary, ingpackboundary); + } + return fl_align; +} + +unsigned int t4vf_get_pf_from_vf(struct adapter *adapter) +{ + u32 whoami; + + whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI); + return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? + G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami)); +} + +/** + * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration + * @adapter: the adapter + * + * Retrieves global RSS mode and parameters with which we have to live + * and stores them in the @adapter's RSS parameters. + */ +int t4vf_get_rss_glb_config(struct adapter *adapter) +{ + struct rss_params *rss = &adapter->params.rss; + struct fw_rss_glb_config_cmd cmd, rpl; + int v; + + /* + * Execute an RSS Global Configuration read command to retrieve + * our RSS configuration. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* + * Translate the big-endian RSS Global Configuration into our + * cpu-endian format based on the RSS mode. We also do first level + * filtering at this point to weed out modes which don't support + * VF Drivers ... + */ + rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE + (be32_to_cpu(rpl.u.manual.mode_pkd)); + switch (rss->mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = be32_to_cpu + (rpl.u.basicvirtual.synmapen_to_hashtoeplitz); + + rss->u.basicvirtual.synmapen = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0); + rss->u.basicvirtual.syn4tupenipv6 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0); + rss->u.basicvirtual.syn2tupenipv6 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0); + rss->u.basicvirtual.syn4tupenipv4 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0); + rss->u.basicvirtual.syn2tupenipv4 = + ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0); + rss->u.basicvirtual.ofdmapen = + ((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0); + rss->u.basicvirtual.tnlmapen = + ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0); + rss->u.basicvirtual.tnlalllookup = + ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0); + rss->u.basicvirtual.hashtoeplitz = + ((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0); + + /* we need at least Tunnel Map Enable to be set */ + if (!rss->u.basicvirtual.tnlmapen) + return -EINVAL; + break; + } + + default: + /* all unknown/unsupported RSS modes result in an error */ + return -EINVAL; + } + return 0; +} + +/** + * t4vf_get_vfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a virtual + * function. The results are stored in @adapter->vfres. + */ +int t4vf_get_vfres(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + struct fw_pfvf_cmd cmd, rpl; + u32 word; + int v; + + /* + * Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + + /* + * Extract VF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word); + vfres->niq = G_FW_PFVF_CMD_NIQ(word); + + word = be32_to_cpu(rpl.type_to_neq); + vfres->neq = G_FW_PFVF_CMD_NEQ(word); + vfres->pmask = G_FW_PFVF_CMD_PMASK(word); + + word = be32_to_cpu(rpl.tc_to_nexactf); + vfres->tc = G_FW_PFVF_CMD_TC(word); + vfres->nvi = G_FW_PFVF_CMD_NVI(word); + vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word); + + word = be32_to_cpu(rpl.r_caps_to_nethctrl); + vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word); + vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word); + vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word); + return 0; +} + +/** + * t4vf_get_port_stats_fw - collect "port" statistics via Firmware + * @adapter: the adapter + * @pidx: the port index + * @s: the stats structure to fill + * + * Collect statistics for the "port"'s Virtual Interface via Firmware + * commands. + */ +static int t4vf_get_port_stats_fw(struct adapter *adapter, int pidx, + struct port_stats *p) +{ + struct port_info *pi = adap2pinfo(adapter, pidx); + unsigned int rem = VI_VF_NUM_STATS; + struct fw_vi_stats_vf fwstats; + __be64 *fwsp = (__be64 *)&fwstats; + + /* + * Grab the Virtual Interface statistics a chunk at a time via mailbox + * commands. We could use a Work Request and get all of them at once + * but that's an asynchronous interface which is awkward to use. + */ + while (rem) { + unsigned int ix = VI_VF_NUM_STATS - rem; + unsigned int nstats = min(6U, rem); + struct fw_vi_stats_cmd cmd, rpl; + size_t len = (offsetof(struct fw_vi_stats_cmd, u) + + sizeof(struct fw_vi_stats_ctl)); + size_t len16 = DIV_ROUND_UP(len, 16); + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_STATS_CMD) | + V_FW_VI_STATS_CMD_VIID(pi->viid) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16)); + cmd.u.ctl.nstats_ix = + cpu_to_be16(V_FW_VI_STATS_CMD_IX(ix) | + V_FW_VI_STATS_CMD_NSTATS(nstats)); + ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); + if (ret != FW_SUCCESS) + return ret; + + memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); + + rem -= nstats; + fwsp += nstats; + } + + /* + * Translate firmware statistics into host native statistics. + */ + p->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); + p->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); + p->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); + p->tx_drop = be64_to_cpu(fwstats.tx_drop_frames); + + p->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); + p->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); + p->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); + p->rx_len_err = be64_to_cpu(fwstats.rx_err_frames); + + return 0; +} + +/** + * t4vf_get_port_stats - collect "port" statistics + * @adapter: the adapter + * @pidx: the port index + * @s: the stats structure to fill + * + * Collect statistics for the "port"'s Virtual Interface. + */ +void t4vf_get_port_stats(struct adapter *adapter, int pidx, + struct port_stats *p) +{ + /* + * If this is not the first Virtual Interface for our Virtual + * Function, we need to use Firmware commands to retrieve its + * MPS statistics. + */ + if (pidx != 0) + t4vf_get_port_stats_fw(adapter, pidx, p); + + /* + * But for the first VI, we can grab its statistics via the MPS + * register mapped into the VF register space. + */ +#define GET_STAT(name) \ + t4_read_reg64(adapter, \ + T4VF_MPS_BASE_ADDR + A_MPS_VF_STAT_##name##_L) + p->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); + p->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); + p->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); + p->tx_drop = GET_STAT(TX_VF_DROP_FRAMES); + + p->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); + p->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); + p->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); + + p->rx_len_err = GET_STAT(RX_VF_ERR_FRAMES); +#undef GET_STAT +} + +static int t4vf_alloc_vi(struct adapter *adapter, int port_id) +{ + struct fw_vi_cmd cmd, rpl; + int v; + + /* + * Execute a VI command to allocate Virtual Interface and return its + * VIID. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | + F_FW_CMD_EXEC); + cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | + F_FW_VI_CMD_ALLOC); + cmd.portid_pkd = V_FW_VI_CMD_PORTID(port_id); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v != FW_SUCCESS) + return v; + return G_FW_VI_CMD_VIID(be16_to_cpu(rpl.type_to_viid)); +} + +int t4vf_port_init(struct adapter *adapter) +{ + unsigned int fw_caps = adapter->params.fw_caps_support; + struct fw_port_cmd port_cmd, port_rpl; + struct fw_vi_cmd vi_cmd, vi_rpl; + fw_port_cap32_t pcaps, acaps; + enum fw_port_type port_type; + int mdio_addr; + int ret, i; + + for_each_port(adapter, i) { + struct port_info *p = adap2pinfo(adapter, i); + + /* + * If we haven't yet determined if we're talking to Firmware + * which knows the new 32-bit Port Caps, it's time to find + * out now. This will also tell new Firmware to send us Port + * Status Updates using the new 32-bit Port Capabilities + * version of the Port Information message. + */ + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val; + + param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | + V_FW_PARAMS_PARAM_X + (FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); + val = 1; + ret = t4vf_set_params(adapter, 1, ¶m, &val); + fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); + adapter->params.fw_caps_support = fw_caps; + } + + ret = t4vf_alloc_vi(adapter, p->port_id); + if (ret < 0) { + dev_err(&pdev->dev, "cannot allocate VI for port %d:" + " err=%d\n", p->port_id, ret); + return ret; + } + p->viid = ret; + + /* + * Execute a VI Read command to get our Virtual Interface + * information like MAC address, etc. + */ + memset(&vi_cmd, 0, sizeof(vi_cmd)); + vi_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | + F_FW_CMD_REQUEST | + F_FW_CMD_READ); + vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); + vi_cmd.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(p->viid)); + ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); + if (ret != FW_SUCCESS) + return ret; + + p->rss_size = G_FW_VI_CMD_RSSSIZE + (be16_to_cpu(vi_rpl.norss_rsssize)); + t4_os_set_hw_addr(adapter, i, vi_rpl.mac); + + /* + * If we don't have read access to our port information, we're + * done now. Else, execute a PORT Read command to get it ... + */ + if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) + return 0; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32 + (V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_READ | + V_FW_PORT_CMD_PORTID(p->port_id)); + port_cmd.action_to_len16 = cpu_to_be32 + (V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ? + FW_PORT_ACTION_GET_PORT_INFO : + FW_PORT_ACTION_GET_PORT_INFO32) | + FW_LEN16(port_cmd)); + ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), + &port_rpl); + if (ret != FW_SUCCESS) + return ret; + + /* + * Extract the various fields from the Port Information message. + */ + if (fw_caps == FW_CAPS16) { + u32 lstatus = be32_to_cpu + (port_rpl.u.info.lstatus_to_modtype); + + port_type = G_FW_PORT_CMD_PTYPE(lstatus); + mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) ? + (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : + -1); + pcaps = fwcaps16_to_caps32 + (be16_to_cpu(port_rpl.u.info.pcap)); + acaps = fwcaps16_to_caps32 + (be16_to_cpu(port_rpl.u.info.acap)); + } else { + u32 lstatus32 = be32_to_cpu + (port_rpl.u.info32.lstatus32_to_cbllen32); + + port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); + mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ? + (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) : + -1); + pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32); + acaps = be32_to_cpu(port_rpl.u.info32.acaps32); + } + + p->port_type = port_type; + p->mdio_addr = mdio_addr; + p->mod_type = FW_PORT_MOD_TYPE_NA; + init_link_config(&p->link_cfg, pcaps, acaps); + } + return 0; +} diff --git a/drivers/net/cxgbe/base/t4vf_hw.h b/drivers/net/cxgbe/base/t4vf_hw.h new file mode 100644 index 00000000..55e436e7 --- /dev/null +++ b/drivers/net/cxgbe/base/t4vf_hw.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef __T4VF_HW_H +#define __T4VF_HW_H + +#define T4VF_PL_BASE_ADDR 0x0200 +#define T4VF_CIM_BASE_ADDR 0x0300 +#define T4VF_MBDATA_BASE_ADDR 0x0240 +#define T6VF_MBDATA_BASE_ADDR 0x0280 + +#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES NUM_CIM_PF_MAILBOX_DATA_INSTANCES +#endif /* __T4VF_HW_H */ diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h index f9891545..e4a52560 100644 --- a/drivers/net/cxgbe/cxgbe.h +++ b/drivers/net/cxgbe/cxgbe.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef _CXGBE_H_ @@ -46,12 +18,25 @@ #define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */ #define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */ +#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */ +#define CXGBE_RSS_HF_ALL (ETH_RSS_IPV4 | ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP) + +#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan" +#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up" + +bool force_linkup(struct adapter *adap); int cxgbe_probe(struct adapter *adapter); +int cxgbevf_probe(struct adapter *adapter); void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps); int cxgbe_up(struct adapter *adap); int cxgbe_down(struct port_info *pi); void cxgbe_close(struct adapter *adapter); void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats); +void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats); void cxgbe_stats_reset(struct port_info *pi); int link_start(struct port_info *pi); void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us, @@ -59,7 +44,11 @@ void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us, int setup_sge_fwevtq(struct adapter *adapter); void cfg_queues(struct rte_eth_dev *eth_dev); int cfg_queue_count(struct rte_eth_dev *eth_dev); +int init_rss(struct adapter *adap); int setup_rss(struct port_info *pi); void cxgbe_enable_rx_queues(struct port_info *pi); +void print_port_info(struct adapter *adap); +void print_adapter_info(struct adapter *adap); +int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key); #endif /* _CXGBE_H_ */ diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h index 03bba9fe..779bcf16 100644 --- a/drivers/net/cxgbe/cxgbe_compat.h +++ b/drivers/net/cxgbe/cxgbe_compat.h @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #ifndef _CXGBE_COMPAT_H_ diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index 5cd260f4..61115e26 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #include @@ -63,6 +35,7 @@ #include #include "cxgbe.h" +#include "cxgbe_pfvf.h" /* * Macros needed to support the PCI Device ID Table ... @@ -85,8 +58,21 @@ */ #include "t4_pci_id_tbl.h" -static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT |\ + DEV_TX_OFFLOAD_IPV4_CKSUM |\ + DEV_TX_OFFLOAD_UDP_CKSUM |\ + DEV_TX_OFFLOAD_TCP_CKSUM |\ + DEV_TX_OFFLOAD_TCP_TSO) + +#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP |\ + DEV_RX_OFFLOAD_CRC_STRIP |\ + DEV_RX_OFFLOAD_IPV4_CKSUM |\ + DEV_RX_OFFLOAD_JUMBO_FRAME |\ + DEV_RX_OFFLOAD_UDP_CKSUM |\ + DEV_RX_OFFLOAD_TCP_CKSUM) + +uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) { struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; uint16_t pkts_sent, pkts_remain; @@ -119,8 +105,8 @@ static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return total_sent; } -static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) +uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) { struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue; unsigned int work_done; @@ -135,8 +121,8 @@ static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return work_done; } -static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, - struct rte_eth_dev_info *device_info) +void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *device_info) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -148,8 +134,6 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, .nb_align = 1, }; - device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE; device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN; device_info->max_rx_queues = max_queues; @@ -159,25 +143,22 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, device_info->max_vfs = adapter->params.arch.vfcount; device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */ - device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + device_info->rx_queue_offload_capa = 0UL; + device_info->rx_offload_capa = CXGBE_RX_OFFLOADS; - device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + device_info->tx_queue_offload_capa = 0UL; + device_info->tx_offload_capa = CXGBE_TX_OFFLOADS; device_info->reta_size = pi->rss_size; + device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN; + device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL; device_info->rx_desc_lim = cxgbe_desc_lim; device_info->tx_desc_lim = cxgbe_desc_lim; cxgbe_get_speed_caps(pi, &device_info->speed_capa); } -static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -186,7 +167,7 @@ static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 1, -1, 1, -1, false); } -static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -195,7 +176,7 @@ static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 0, -1, 1, -1, false); } -static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -206,7 +187,7 @@ static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) -1, 1, 1, -1, false); } -static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -217,28 +198,26 @@ static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) -1, 0, 1, -1, false); } -static int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, - __rte_unused int wait_to_complete) +int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, + __rte_unused int wait_to_complete) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; - struct rte_eth_link *old_link = ð_dev->data->dev_link; + struct rte_eth_link new_link; unsigned int work_done, budget = 4; cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); - if (old_link->link_status == pi->link_cfg.link_ok) - return -1; /* link not changed */ - eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok; - eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - eth_dev->data->dev_link.link_speed = pi->link_cfg.speed; + new_link.link_status = force_linkup(adapter) ? + ETH_LINK_UP : pi->link_cfg.link_ok; + new_link.link_duplex = ETH_LINK_FULL_DUPLEX; + new_link.link_speed = pi->link_cfg.speed; - /* link has changed */ - return 0; + return rte_eth_linkstatus_set(eth_dev, &new_link); } -static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -254,9 +233,11 @@ static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) /* set to jumbo mode if needed */ if (new_mtu > ETHER_MAX_LEN) - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1, -1, -1, true); @@ -266,21 +247,13 @@ static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) return err; } -static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, - uint16_t tx_queue_id); -static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, - uint16_t tx_queue_id); -static void cxgbe_dev_tx_queue_release(void *q); -static void cxgbe_dev_rx_queue_release(void *q); - /* * Stop device. */ -static void cxgbe_dev_close(struct rte_eth_dev *eth_dev) +void cxgbe_dev_close(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; - int i, dev_down = 0; CXGBE_FUNC_TRACE(); @@ -294,28 +267,12 @@ static void cxgbe_dev_close(struct rte_eth_dev *eth_dev) * have been disabled */ t4_sge_eth_clear_queues(pi); - - /* See if all ports are down */ - for_each_port(adapter, i) { - pi = adap2pinfo(adapter, i); - /* - * Skip first port of the adapter since it will be closed - * by DPDK - */ - if (i == 0) - continue; - dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0; - } - - /* If rest of the ports are stopped, then free up resources */ - if (dev_down == (adapter->params.nports - 1)) - cxgbe_close(adapter); } /* Start the device. * It returns 0 on success. */ -static int cxgbe_dev_start(struct rte_eth_dev *eth_dev) +int cxgbe_dev_start(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -367,7 +324,7 @@ out: /* * Stop device: disable rx and tx functions to allow for reconfiguring. */ -static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) +void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -386,13 +343,20 @@ static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) t4_sge_eth_clear_queues(pi); } -static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) +int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; + uint64_t configured_offloads; int err; CXGBE_FUNC_TRACE(); + configured_offloads = eth_dev->data->dev_conf.rxmode.offloads; + if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { + dev_info(adapter, "can't disable hw crc strip\n"); + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_CRC_STRIP; + } if (!(adapter->flags & FW_QUEUE_BOUND)) { err = setup_sge_fwevtq(adapter); @@ -408,8 +372,7 @@ static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) return 0; } -static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, - uint16_t tx_queue_id) +int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) { int ret; struct sge_eth_txq *txq = (struct sge_eth_txq *) @@ -424,8 +387,7 @@ static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, return ret; } -static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, - uint16_t tx_queue_id) +int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) { int ret; struct sge_eth_txq *txq = (struct sge_eth_txq *) @@ -440,10 +402,10 @@ static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, return ret; } -static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, - uint16_t queue_idx, uint16_t nb_desc, - unsigned int socket_id, - const struct rte_eth_txconf *tx_conf) +int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf __rte_unused) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -452,8 +414,6 @@ static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, int err = 0; unsigned int temp_nb_desc; - RTE_SET_USED(tx_conf); - dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n", __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc, socket_id, pi->first_qset); @@ -488,13 +448,12 @@ static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx, s->fw_evtq.cntxt_id, socket_id); - dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n", - __func__, txq->q.cntxt_id, err); - + dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n", + __func__, txq->q.cntxt_id, txq->q.abs_id, err); return err; } -static void cxgbe_dev_tx_queue_release(void *q) +void cxgbe_dev_tx_queue_release(void *q) { struct sge_eth_txq *txq = (struct sge_eth_txq *)q; @@ -510,8 +469,7 @@ static void cxgbe_dev_tx_queue_release(void *q) } } -static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, - uint16_t rx_queue_id) +int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { int ret; struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); @@ -530,8 +488,7 @@ static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, return ret; } -static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, - uint16_t rx_queue_id) +int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { int ret; struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); @@ -549,11 +506,11 @@ static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, return ret; } -static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, - uint16_t queue_idx, uint16_t nb_desc, - unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, - struct rte_mempool *mp) +int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mp) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; @@ -565,8 +522,6 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info dev_info; unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; - RTE_SET_USED(rx_conf); - dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n", __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc, socket_id, mp); @@ -613,21 +568,25 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, /* Set to jumbo mode if necessary */ if (pkt_len > ETHER_MAX_LEN) - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx, &rxq->fl, t4_ethrx_handler, - t4_get_tp_ch_map(adapter, pi->tx_chan), mp, + is_pf4(adapter) ? + t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp, queue_idx, socket_id); - dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n", - __func__, err, pi->port_id, rxq->rspq.cntxt_id); + dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n", + __func__, err, pi->port_id, rxq->rspq.cntxt_id, + rxq->rspq.abs_id); return err; } -static void cxgbe_dev_rx_queue_release(void *q) +void cxgbe_dev_rx_queue_release(void *q) { struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q; struct sge_rspq *rq = &rxq->rspq; @@ -750,7 +709,7 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct adapter *adapter = pi->adapter; struct link_config *lc = &pi->link_cfg; - if (lc->supported & FW_PORT_CAP_ANEG) { + if (lc->pcaps & FW_PORT_CAP32_ANEG) { if (fc_conf->autoneg) lc->requested_fc |= PAUSE_AUTONEG; else @@ -773,7 +732,7 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, &pi->link_cfg); } -static const uint32_t * +const uint32_t * cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) { static const uint32_t ptypes[] = { @@ -787,6 +746,88 @@ cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) return NULL; } +/* Update RSS hash configuration + */ +static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + int err; + + err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf); + if (err) + return err; + + pi->rss_hf = rss_conf->rss_hf; + + if (rss_conf->rss_key) { + u32 key[10], mod_key[10]; + int i, j; + + memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN); + + for (i = 9, j = 0; i >= 0; i--, j++) + mod_key[j] = cpu_to_be32(key[i]); + + t4_write_rss_key(adapter, mod_key, -1); + } + + return 0; +} + +/* Get RSS hash configuration + */ +static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + u64 rss_hf = 0; + u64 flags = 0; + int err; + + err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid, + &flags, NULL); + + if (err) + return err; + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) { + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + } + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) + rss_hf |= ETH_RSS_IPV6; + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) { + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + } + + if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) + rss_hf |= ETH_RSS_IPV4; + + rss_conf->rss_hf = rss_hf; + + if (rss_conf->rss_key) { + u32 key[10], mod_key[10]; + int i, j; + + t4_read_rss_key(adapter, key); + + for (i = 9, j = 0; i >= 0; i--, j++) + mod_key[j] = be32_to_cpu(key[i]); + + memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN); + } + + return 0; +} + static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev) { RTE_SET_USED(dev); @@ -956,6 +997,23 @@ static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, return 0; } +int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + int ret; + + ret = t4_change_mac(adapter, adapter->mbox, pi->viid, + pi->xact_addr_filt, (u8 *)addr, true, true); + if (ret < 0) { + dev_err(adapter, "failed to set mac addr; err = %d\n", + ret); + return ret; + } + pi->xact_addr_filt = ret; + return 0; +} + static const struct eth_dev_ops cxgbe_eth_dev_ops = { .dev_start = cxgbe_dev_start, .dev_stop = cxgbe_dev_stop, @@ -985,6 +1043,9 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .get_eeprom = cxgbe_get_eeprom, .set_eeprom = cxgbe_set_eeprom, .get_reg = cxgbe_get_regs, + .rss_hash_update = cxgbe_dev_rss_hash_update, + .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get, + .mac_addr_set = cxgbe_mac_addr_set, }; /* @@ -1004,14 +1065,34 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &cxgbe_eth_dev_ops; eth_dev->rx_pkt_burst = &cxgbe_recv_pkts; eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - /* for secondary processes, we don't initialise any further as primary - * has already done this work. + /* for secondary processes, we attach to ethdevs allocated by primary + * and do minimal initialization. */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + int i; + + for (i = 1; i < MAX_NPORTS; i++) { + struct rte_eth_dev *rest_eth_dev; + char namei[RTE_ETH_NAME_MAX_LEN]; + + snprintf(namei, sizeof(namei), "%s_%d", + pci_dev->device.name, i); + rest_eth_dev = rte_eth_dev_attach_secondary(namei); + if (rest_eth_dev) { + rest_eth_dev->device = &pci_dev->device; + rest_eth_dev->dev_ops = + eth_dev->dev_ops; + rest_eth_dev->rx_pkt_burst = + eth_dev->rx_pkt_burst; + rest_eth_dev->tx_pkt_burst = + eth_dev->tx_pkt_burst; + rte_eth_dev_probing_finish(rest_eth_dev); + } + } return 0; - - pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + } snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id); adapter = rte_zmalloc(name, sizeof(*adapter), 0); @@ -1043,6 +1124,16 @@ out_free_adapter: return err; } +static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adap = pi->adapter; + + /* Free up other ports and all resources */ + cxgbe_close(adap); + return 0; +} + static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { @@ -1052,7 +1143,7 @@ static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, NULL); + return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit); } static struct rte_pci_driver rte_cxgbe_pmd = { @@ -1065,3 +1156,6 @@ static struct rte_pci_driver rte_cxgbe_pmd = { RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl); RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe, + CXGBE_DEVARG_KEEP_OVLAN "=<0|1> " + CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> "); diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c index 28db6c06..54eb23df 100644 --- a/drivers/net/cxgbe/cxgbe_main.c +++ b/drivers/net/cxgbe/cxgbe_main.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #include @@ -57,9 +29,9 @@ #include #include #include -#include #include #include +#include #include "common.h" #include "t4_regs.h" @@ -199,15 +171,16 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, static inline bool is_x_1g_port(const struct link_config *lc) { - return (lc->supported & FW_PORT_CAP_SPEED_1G) != 0; + return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0; } static inline bool is_x_10g_port(const struct link_config *lc) { unsigned int speeds, high_speeds; - speeds = V_FW_PORT_CAP_SPEED(G_FW_PORT_CAP_SPEED(lc->supported)); - high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); + speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps)); + high_speeds = speeds & + ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); return high_speeds != 0; } @@ -345,14 +318,17 @@ static void setup_memwin(struct adapter *adap) MEMWIN_NIC)); } -static int init_rss(struct adapter *adap) +int init_rss(struct adapter *adap) { unsigned int i; - int err; - err = t4_init_rss_mode(adap, adap->mbox); - if (err) - return err; + if (is_pf4(adap)) { + int err; + + err = t4_init_rss_mode(adap, adap->mbox); + if (err) + return err; + } for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i); @@ -360,6 +336,8 @@ static int init_rss(struct adapter *adap) pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0); if (!pi->rss) return -ENOMEM; + + pi->rss_hf = CXGBE_RSS_HF_ALL; } return 0; } @@ -367,7 +345,7 @@ static int init_rss(struct adapter *adap) /** * Dump basic information about the adapter. */ -static void print_adapter_info(struct adapter *adap) +void print_adapter_info(struct adapter *adap) { /** * Hardware/Firmware/etc. Version/Revision IDs. @@ -375,27 +353,29 @@ static void print_adapter_info(struct adapter *adap) t4_dump_version_info(adap); } -static void print_port_info(struct adapter *adap) +void print_port_info(struct adapter *adap) { int i; char buf[80]; struct rte_pci_addr *loc = &adap->pdev->addr; for_each_port(adap, i) { - const struct port_info *pi = &adap->port[i]; + const struct port_info *pi = adap2pinfo(adap, i); char *bufp = buf; - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) bufp += sprintf(bufp, "100M/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) bufp += sprintf(bufp, "1G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) bufp += sprintf(bufp, "10G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) bufp += sprintf(bufp, "25G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) bufp += sprintf(bufp, "40G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) + bufp += sprintf(bufp, "50G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) bufp += sprintf(bufp, "100G/"); if (bufp != buf) --bufp; @@ -412,6 +392,84 @@ static void print_port_info(struct adapter *adap) } } +static int +check_devargs_handler(__rte_unused const char *key, const char *value, + __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key) +{ + struct rte_kvargs *kvlist; + + if (!devargs) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (!kvlist) + return 0; + + if (!rte_kvargs_count(kvlist, key)) { + rte_kvargs_free(kvlist); + return 0; + } + + if (rte_kvargs_process(kvlist, key, + check_devargs_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +static void configure_vlan_types(struct adapter *adapter) +{ + struct rte_pci_device *pdev = adapter->pdev; + int i; + + for_each_port(adapter, i) { + /* OVLAN Type 0x88a8 */ + t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(M_OVLAN_ETYPE), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(0x88a8)); + /* OVLAN Type 0x9100 */ + t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(M_OVLAN_ETYPE), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(0x9100)); + /* OVLAN Type 0x8100 */ + t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(M_OVLAN_ETYPE), + V_OVLAN_MASK(M_OVLAN_MASK) | + V_OVLAN_ETYPE(0x8100)); + + /* IVLAN 0X8100 */ + t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i), + V_IVLAN_ETYPE(M_IVLAN_ETYPE), + V_IVLAN_ETYPE(0x8100)); + + t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i), + F_OVLAN_EN0 | F_OVLAN_EN1 | + F_OVLAN_EN2 | F_IVLAN_EN, + F_OVLAN_EN0 | F_OVLAN_EN1 | + F_OVLAN_EN2 | F_IVLAN_EN); + } + + if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN)) + t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, + V_RM_OVLAN(1), V_RM_OVLAN(0)); +} + static void configure_pcie_ext_tag(struct adapter *adapter) { u16 v; @@ -828,6 +886,7 @@ static int adap_init0(struct adapter *adap) t4_init_sge_params(adap); t4_init_tp_params(adap); configure_pcie_ext_tag(adap); + configure_vlan_types(adap); adap->params.drv_memwin = MEMWIN_NIC; adap->flags |= FW_OK; @@ -860,7 +919,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" }; - const struct port_info *pi = &adap->port[port_id]; + const struct port_info *pi = adap2pinfo(adap, port_id); if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) dev_info(adap, "Port%d: port module unplugged\n", pi->port_id); @@ -881,6 +940,18 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) pi->port_id, pi->mod_type); } +inline bool force_linkup(struct adapter *adap) +{ + struct rte_pci_device *pdev = adap->pdev; + + if (is_pf4(adap)) + return false; /* force_linkup not required for pf driver*/ + if (!cxgbe_get_devargs(pdev->device.devargs, + CXGBE_DEVARG_FORCE_LINK_UP)) + return false; + return true; +} + /** * link_start - enable a port * @dev: the port to enable @@ -912,7 +983,7 @@ int link_start(struct port_info *pi) ret = 0; } } - if (ret == 0) + if (ret == 0 && is_pf4(adapter)) ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, &pi->link_cfg); if (ret == 0) { @@ -926,18 +997,78 @@ int link_start(struct port_info *pi) ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid, true, true, false); } + + if (ret == 0 && force_linkup(adapter)) + pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP; return ret; } /** - * cxgb4_write_rss - write the RSS table for a given port + * cxgbe_write_rss_conf - flash the RSS configuration for a given port + * @pi: the port + * @rss_hf: Hash configuration to apply + */ +int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf) +{ + struct adapter *adapter = pi->adapter; + const struct sge_eth_rxq *rxq; + u64 flags = 0; + u16 rss; + int err; + + /* Should never be called before setting up sge eth rx queues */ + if (!(adapter->flags & FULL_INIT_DONE)) { + dev_err(adap, "%s No RXQs available on port %d\n", + __func__, pi->port_id); + return -EINVAL; + } + + /* Don't allow unsupported hash functions */ + if (rss_hf & ~CXGBE_RSS_HF_ALL) + return -EINVAL; + + if (rss_hf & ETH_RSS_IPV4) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; + + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | + F_FW_RSS_VI_CONFIG_CMD_UDPEN; + + if (rss_hf & ETH_RSS_IPV6) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; + + if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; + + if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + flags |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | + F_FW_RSS_VI_CONFIG_CMD_UDPEN; + + rxq = &adapter->sge.ethrxq[pi->first_qset]; + rss = rxq[0].rspq.abs_id; + + /* If Tunnel All Lookup isn't specified in the global RSS + * Configuration, then we need to specify a default Ingress + * Queue for any ingress packets which aren't hashed. We'll + * use our first ingress queue ... + */ + err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, + flags, rss); + return err; +} + +/** + * cxgbe_write_rss - write the RSS table for a given port * @pi: the port * @queues: array of queue indices for RSS * * Sets up the portion of the HW RSS table for the port's VI to distribute * packets to the Rx queues in @queues. */ -int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) +int cxgbe_write_rss(const struct port_info *pi, const u16 *queues) { u16 *rss; int i, err; @@ -958,20 +1089,6 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, pi->rss_size, rss, pi->rss_size); - /* - * If Tunnel All Lookup isn't specified in the global RSS - * Configuration, then we need to specify a default Ingress - * Queue for any ingress packets which aren't hashed. We'll - * use our first ingress queue ... - */ - if (!err) - err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, - F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | - F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | - F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | - F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | - F_FW_RSS_VI_CONFIG_CMD_UDPEN, - rss[0]); rte_free(rss); return err; } @@ -1001,7 +1118,11 @@ int setup_rss(struct port_info *pi) for (j = 0; j < pi->rss_size; j++) pi->rss[j] = j % pi->n_rx_qsets; - err = cxgb4_write_rss(pi, pi->rss); + err = cxgbe_write_rss(pi, pi->rss); + if (err) + return err; + + err = cxgbe_write_rss_conf(pi, pi->rss_hf); if (err) return err; pi->flags |= PORT_RSS_DONE; @@ -1016,7 +1137,8 @@ int setup_rss(struct port_info *pi) static void enable_rx(struct adapter *adap, struct sge_rspq *q) { /* 0-increment GTS to start the timer and enable interrupts */ - t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS), + t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) : + T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS, V_SEINTARM(q->intr_params) | V_INGRESSQID(q->cntxt_id)); } @@ -1051,7 +1173,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type, #define FW_CAPS_TO_SPEED(__fw_name) \ do { \ - if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ + if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \ SET_SPEED(__fw_name); \ } while (0) @@ -1106,6 +1228,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type, case FW_PORT_TYPE_CR4_QSFP: FW_CAPS_TO_SPEED(SPEED_25G); FW_CAPS_TO_SPEED(SPEED_40G); + FW_CAPS_TO_SPEED(SPEED_50G); FW_CAPS_TO_SPEED(SPEED_100G); break; @@ -1128,10 +1251,10 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps) { *speed_caps = 0; - fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.supported, + fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps, speed_caps); - if (!(pi->link_cfg.supported & FW_PORT_CAP_ANEG)) + if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)) *speed_caps |= ETH_LINK_SPEED_FIXED; } @@ -1147,7 +1270,8 @@ int cxgbe_up(struct adapter *adap) { enable_rx(adap, &adap->sge.fw_evtq); t4_sge_tx_monitor_start(adap); - t4_intr_enable(adap); + if (is_pf4(adap)) + t4_intr_enable(adap); adap->flags |= FULL_INIT_DONE; /* TODO: deadman watchdog ?? */ @@ -1168,7 +1292,7 @@ int cxgbe_down(struct port_info *pi) return err; } - t4_reset_link_config(adapter, pi->port_id); + t4_reset_link_config(adapter, pi->pidx); return 0; } @@ -1181,7 +1305,8 @@ void cxgbe_close(struct adapter *adapter) int i; if (adapter->flags & FULL_INIT_DONE) { - t4_intr_disable(adapter); + if (is_pf4(adapter)) + t4_intr_disable(adapter); t4_sge_tx_monitor_stop(adapter); t4_free_sge_resources(adapter); for_each_port(adapter, i) { @@ -1190,11 +1315,16 @@ void cxgbe_close(struct adapter *adapter) t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid); rte_free(pi->eth_dev->data->mac_addrs); + /* Skip first port since it'll be freed by DPDK stack */ + if (i) { + rte_free(pi->eth_dev->data->dev_private); + rte_eth_dev_release_port(pi->eth_dev); + } } adapter->flags &= ~FULL_INIT_DONE; } - if (adapter->flags & FW_OK) + if (is_pf4(adapter) && (adapter->flags & FW_OK)) t4_fw_bye(adapter, adapter->mbox); } @@ -1265,21 +1395,16 @@ int cxgbe_probe(struct adapter *adapter) } for_each_port(adapter, i) { - char name[RTE_ETH_NAME_MAX_LEN]; - struct rte_eth_dev_data *data = NULL; const unsigned int numa_node = rte_socket_id(); + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; - pi = &adapter->port[i]; - pi->adapter = adapter; - pi->xact_addr_filt = -1; - pi->port_id = i; - - snprintf(name, sizeof(name), "cxgbe%d", - adapter->eth_dev->data->port_id + i); + snprintf(name, sizeof(name), "%s_%d", + adapter->pdev->device.name, i); if (i == 0) { /* First port is already allocated by DPDK */ - pi->eth_dev = adapter->eth_dev; + eth_dev = adapter->eth_dev; goto allocate_mac; } @@ -1289,21 +1414,26 @@ int cxgbe_probe(struct adapter *adapter) */ /* reserve an ethdev entry */ - pi->eth_dev = rte_eth_dev_allocate(name); - if (!pi->eth_dev) + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) goto out_free; - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (!data) + eth_dev->data->dev_private = + rte_zmalloc_socket(name, sizeof(struct port_info), + RTE_CACHE_LINE_SIZE, numa_node); + if (!eth_dev->data->dev_private) goto out_free; - data->port_id = adapter->eth_dev->data->port_id + i; - - pi->eth_dev->data = data; - allocate_mac: + pi = (struct port_info *)eth_dev->data->dev_private; + adapter->port[i] = pi; + pi->eth_dev = eth_dev; + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->port_id = i; + pi->pidx = i; + pi->eth_dev->device = &adapter->pdev->device; - pi->eth_dev->data->dev_private = pi; pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops; pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst; pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst; @@ -1318,6 +1448,11 @@ allocate_mac: err = -1; goto out_free; } + + if (i > 0) { + /* First port will be notified by upper layer */ + rte_eth_dev_probing_finish(eth_dev); + } } if (adapter->flags & FW_OK) { @@ -1349,8 +1484,11 @@ out_free: /* Skip first port since it'll be de-allocated by DPDK */ if (i == 0) continue; - if (pi->eth_dev->data) - rte_free(pi->eth_dev->data); + if (pi->eth_dev) { + if (pi->eth_dev->data->dev_private) + rte_free(pi->eth_dev->data->dev_private); + rte_eth_dev_release_port(pi->eth_dev); + } } if (adapter->flags & FW_OK) diff --git a/drivers/net/cxgbe/cxgbe_pfvf.h b/drivers/net/cxgbe/cxgbe_pfvf.h new file mode 100644 index 00000000..2bba9742 --- /dev/null +++ b/drivers/net/cxgbe/cxgbe_pfvf.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#ifndef _CXGBE_PFVF_H_ +#define _CXGBE_PFVF_H_ + +void cxgbe_dev_rx_queue_release(void *q); +void cxgbe_dev_tx_queue_release(void *q); +void cxgbe_dev_stop(struct rte_eth_dev *eth_dev); +void cxgbe_dev_close(struct rte_eth_dev *eth_dev); +void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *device_info); +void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev); +void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev); +void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev); +void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev); +int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr); +int cxgbe_dev_configure(struct rte_eth_dev *eth_dev); +int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id); +int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, + uint16_t tx_queue_id); +int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id); +int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id); +int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu); +int cxgbe_dev_start(struct rte_eth_dev *eth_dev); +int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, + int wait_to_complete); +uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +const uint32_t *cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev); +#endif /* _CXGBE_PFVF_H_ */ diff --git a/drivers/net/cxgbe/cxgbevf_ethdev.c b/drivers/net/cxgbe/cxgbevf_ethdev.c new file mode 100644 index 00000000..a942ba6b --- /dev/null +++ b/drivers/net/cxgbe/cxgbevf_ethdev.c @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include + +#include "cxgbe.h" +#include "cxgbe_pfvf.h" + +/* + * Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static const struct rte_pci_id cxgb4vf_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x8 + +#define PCI_VENDOR_ID_CHELSIO 0x1425 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ + { .vendor_id = 0, } \ + } + +/* + *... and the PCI ID Table itself ... + */ +#include "t4_pci_id_tbl.h" + +/* + * Get port statistics. + */ +static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *eth_stats) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct port_stats ps; + unsigned int i; + + cxgbevf_stats_get(pi, &ps); + + /* RX Stats */ + eth_stats->ierrors = ps.rx_len_err; + + /* TX Stats */ + eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames + + ps.tx_ucast_frames; + eth_stats->oerrors = ps.tx_drop; + + for (i = 0; i < pi->n_rx_qsets; i++) { + struct sge_eth_rxq *rxq = + &s->ethrxq[pi->first_qset + i]; + + eth_stats->q_ipackets[i] = rxq->stats.pkts; + eth_stats->q_ibytes[i] = rxq->stats.rx_bytes; + eth_stats->ipackets += eth_stats->q_ipackets[i]; + eth_stats->ibytes += eth_stats->q_ibytes[i]; + } + + for (i = 0; i < pi->n_tx_qsets; i++) { + struct sge_eth_txq *txq = + &s->ethtxq[pi->first_qset + i]; + + eth_stats->q_opackets[i] = txq->stats.pkts; + eth_stats->q_obytes[i] = txq->stats.tx_bytes; + eth_stats->q_errors[i] = txq->stats.mapping_err; + } + return 0; +} + +static const struct eth_dev_ops cxgbevf_eth_dev_ops = { + .dev_start = cxgbe_dev_start, + .dev_stop = cxgbe_dev_stop, + .dev_close = cxgbe_dev_close, + .promiscuous_enable = cxgbe_dev_promiscuous_enable, + .promiscuous_disable = cxgbe_dev_promiscuous_disable, + .allmulticast_enable = cxgbe_dev_allmulticast_enable, + .allmulticast_disable = cxgbe_dev_allmulticast_disable, + .dev_configure = cxgbe_dev_configure, + .dev_infos_get = cxgbe_dev_info_get, + .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get, + .link_update = cxgbe_dev_link_update, + .mtu_set = cxgbe_dev_mtu_set, + .tx_queue_setup = cxgbe_dev_tx_queue_setup, + .tx_queue_start = cxgbe_dev_tx_queue_start, + .tx_queue_stop = cxgbe_dev_tx_queue_stop, + .tx_queue_release = cxgbe_dev_tx_queue_release, + .rx_queue_setup = cxgbe_dev_rx_queue_setup, + .rx_queue_start = cxgbe_dev_rx_queue_start, + .rx_queue_stop = cxgbe_dev_rx_queue_stop, + .rx_queue_release = cxgbe_dev_rx_queue_release, + .stats_get = cxgbevf_dev_stats_get, + .mac_addr_set = cxgbe_mac_addr_set, +}; + +/* + * Initialize driver + * It returns 0 on success. + */ +static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct rte_pci_device *pci_dev; + char name[RTE_ETH_NAME_MAX_LEN]; + struct adapter *adapter = NULL; + int err = 0; + + CXGBE_FUNC_TRACE(); + + eth_dev->dev_ops = &cxgbevf_eth_dev_ops; + eth_dev->rx_pkt_burst = &cxgbe_recv_pkts; + eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + + /* for secondary processes, we attach to ethdevs allocated by primary + * and do minimal initialization. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + int i; + + for (i = 1; i < MAX_NPORTS; i++) { + struct rte_eth_dev *rest_eth_dev; + char namei[RTE_ETH_NAME_MAX_LEN]; + + snprintf(namei, sizeof(namei), "%s_%d", + pci_dev->device.name, i); + rest_eth_dev = rte_eth_dev_attach_secondary(namei); + if (rest_eth_dev) { + rest_eth_dev->device = &pci_dev->device; + rest_eth_dev->dev_ops = + eth_dev->dev_ops; + rest_eth_dev->rx_pkt_burst = + eth_dev->rx_pkt_burst; + rest_eth_dev->tx_pkt_burst = + eth_dev->tx_pkt_burst; + rte_eth_dev_probing_finish(rest_eth_dev); + } + } + return 0; + } + + snprintf(name, sizeof(name), "cxgbevfadapter%d", + eth_dev->data->port_id); + adapter = rte_zmalloc(name, sizeof(*adapter), 0); + if (!adapter) + return -1; + + adapter->use_unpacked_mode = 1; + adapter->regs = (void *)pci_dev->mem_resource[0].addr; + if (!adapter->regs) { + dev_err(adapter, "%s: cannot map device registers\n", __func__); + err = -ENOMEM; + goto out_free_adapter; + } + adapter->pdev = pci_dev; + adapter->eth_dev = eth_dev; + pi->adapter = adapter; + err = cxgbevf_probe(adapter); + if (err) { + dev_err(adapter, "%s: cxgbevf probe failed with err %d\n", + __func__, err); + goto out_free_adapter; + } + + return 0; + +out_free_adapter: + rte_free(adapter); + return err; +} + +static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info), + eth_cxgbevf_dev_init); +} + +static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} + +static struct rte_pci_driver rte_cxgbevf_pmd = { + .id_table = cxgb4vf_pci_tbl, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_cxgbevf_pci_probe, + .remove = eth_cxgbevf_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl); +RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci"); diff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c new file mode 100644 index 00000000..5b3fb539 --- /dev/null +++ b/drivers/net/cxgbe/cxgbevf_main.c @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Chelsio Communications. + * All rights reserved. + */ + +#include +#include +#include + +#include "common.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "cxgbe.h" + +/* + * Figure out how many Ports and Queue Sets we can support. This depends on + * knowing our Virtual Function Resources and may be called a second time if + * we fall back from MSI-X to MSI Interrupt Mode. + */ +static void size_nports_qsets(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + unsigned int ethqsets, pmask_nports; + + /* + * The number of "ports" which we support is equal to the number of + * Virtual Interfaces with which we've been provisioned. + */ + adapter->params.nports = vfres->nvi; + if (adapter->params.nports > MAX_NPORTS) { + dev_warn(adapter->pdev_dev, "only using %d of %d maximum" + " allowed virtual interfaces\n", MAX_NPORTS, + adapter->params.nports); + adapter->params.nports = MAX_NPORTS; + } + + /* + * We may have been provisioned with more VIs than the number of + * ports we're allowed to access (our Port Access Rights Mask). + * This is obviously a configuration conflict but we don't want to + * do anything silly just because of that. + */ + pmask_nports = hweight32(adapter->params.vfres.pmask); + if (pmask_nports < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d provissioned" + " virtual interfaces; limited by Port Access Rights" + " mask %#x\n", pmask_nports, adapter->params.nports, + adapter->params.vfres.pmask); + adapter->params.nports = pmask_nports; + } + + /* + * We need to reserve an Ingress Queue for the Asynchronous Firmware + * Event Queue. + * + * For each Queue Set, we'll need the ability to allocate two Egress + * Contexts -- one for the Ingress Queue Free List and one for the TX + * Ethernet Queue. + */ + ethqsets = vfres->niqflint - 1; + if (vfres->nethctrl != ethqsets) + ethqsets = min(vfres->nethctrl, ethqsets); + if (vfres->neq < ethqsets * 2) + ethqsets = vfres->neq / 2; + if (ethqsets > MAX_ETH_QSETS) + ethqsets = MAX_ETH_QSETS; + adapter->sge.max_ethqsets = ethqsets; + + if (adapter->sge.max_ethqsets < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d available" + " virtual interfaces (too few Queue Sets)\n", + adapter->sge.max_ethqsets, adapter->params.nports); + adapter->params.nports = adapter->sge.max_ethqsets; + } +} + +void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats) +{ + t4vf_get_port_stats(pi->adapter, pi->pidx, stats); +} + +static int adap_init0vf(struct adapter *adapter) +{ + u32 param, val = 0; + int err; + + err = t4vf_fw_reset(adapter); + if (err < 0) { + dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); + return err; + } + + /* + * Grab basic operational parameters. These will predominantly have + * been set up by the Physical Function Driver or will be hard coded + * into the adapter. We just have to live with them ... Note that + * we _must_ get our VPD parameters before our SGE parameters because + * we need to know the adapter's core clock from the VPD in order to + * properly decode the SGE Timer Values. + */ + err = t4vf_get_dev_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " device parameters: err=%d\n", err); + return err; + } + + err = t4vf_get_vpd_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " VPD parameters: err=%d\n", err); + return err; + } + + adapter->pf = t4vf_get_pf_from_vf(adapter); + err = t4vf_sge_init(adapter); + if (err) { + dev_err(adapter->pdev_dev, "error in sge init\n"); + return err; + } + + err = t4vf_get_rss_glb_config(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " RSS parameters: err=%d\n", err); + return err; + } + if (adapter->params.rss.mode != + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + dev_err(adapter->pdev_dev, "unable to operate with global RSS" + " mode %d\n", adapter->params.rss.mode); + return -EINVAL; + } + + /* If we're running on newer firmware, let it know that we're + * prepared to deal with encapsulated CPL messages. Older + * firmware won't understand this and we'll just get + * unencapsulated messages ... + */ + param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP); + val = 1; + t4vf_set_params(adapter, 1, ¶m, &val); + + /* + * Grab our Virtual Interface resource allocation, extract the + * features that we're interested in and do a bit of sanity testing on + * what we discover. + */ + err = t4vf_get_vfres(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to get virtual interface" + " resources: err=%d\n", err); + return err; + } + + /* + * Check for various parameter sanity issues. + */ + if (adapter->params.vfres.pmask == 0) { + dev_err(adapter->pdev_dev, "no port access configured\n" + "usable!\n"); + return -EINVAL; + } + if (adapter->params.vfres.nvi == 0) { + dev_err(adapter->pdev_dev, "no virtual interfaces configured/" + "usable!\n"); + return -EINVAL; + } + + /* + * Initialize nports and max_ethqsets now that we have our Virtual + * Function Resources. + */ + size_nports_qsets(adapter); + adapter->flags |= FW_OK; + return 0; +} + +int cxgbevf_probe(struct adapter *adapter) +{ + struct port_info *pi; + unsigned int pmask; + int err = 0; + int i; + + t4_os_lock_init(&adapter->mbox_lock); + TAILQ_INIT(&adapter->mbox_list); + err = t4vf_prep_adapter(adapter); + if (err) + return err; + + if (!is_t4(adapter->params.chip)) { + adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr; + if (!adapter->bar2) { + dev_err(adapter, "cannot map device bar2 region\n"); + err = -ENOMEM; + return err; + } + } + + err = adap_init0vf(adapter); + if (err) { + dev_err(adapter, "%s: Adapter initialization failed, error %d\n", + __func__, err); + goto out_free; + } + + pmask = adapter->params.vfres.pmask; + for_each_port(adapter, i) { + const unsigned int numa_node = rte_socket_id(); + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; + int port_id; + + if (pmask == 0) + break; + port_id = ffs(pmask) - 1; + pmask &= ~(1 << port_id); + + snprintf(name, sizeof(name), "%s_%d", + adapter->pdev->device.name, i); + + if (i == 0) { + /* First port is already allocated by DPDK */ + eth_dev = adapter->eth_dev; + goto allocate_mac; + } + + /* + * now do all data allocation - for eth_dev structure, + * and internal (private) data for the remaining ports + */ + + /* reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) { + err = -ENOMEM; + goto out_free; + } + eth_dev->data->dev_private = + rte_zmalloc_socket(name, sizeof(struct port_info), + RTE_CACHE_LINE_SIZE, numa_node); + if (!eth_dev->data->dev_private) + goto out_free; + +allocate_mac: + pi = (struct port_info *)eth_dev->data->dev_private; + adapter->port[i] = pi; + pi->eth_dev = eth_dev; + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->port_id = port_id; + pi->pidx = i; + + pi->eth_dev->device = &adapter->pdev->device; + pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops; + pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst; + pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst; + + rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev); + pi->eth_dev->data->mac_addrs = rte_zmalloc(name, + ETHER_ADDR_LEN, 0); + if (!pi->eth_dev->data->mac_addrs) { + dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n", + __func__); + err = -ENOMEM; + goto out_free; + } + + if (i > 0) { + /* First port will be notified by upper layer */ + rte_eth_dev_probing_finish(eth_dev); + } + } + + if (adapter->flags & FW_OK) { + err = t4vf_port_init(adapter); + if (err) { + dev_err(adapter, "%s: t4_port_init failed with err %d\n", + __func__, err); + goto out_free; + } + } + + cfg_queues(adapter->eth_dev); + print_adapter_info(adapter); + print_port_info(adapter); + + err = init_rss(adapter); + if (err) + goto out_free; + return 0; + +out_free: + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (pi->viid != 0) + t4_free_vi(adapter, adapter->mbox, adapter->pf, + 0, pi->viid); + /* Skip first port since it'll be de-allocated by DPDK */ + if (i == 0) + continue; + if (pi->eth_dev) { + if (pi->eth_dev->data->dev_private) + rte_free(pi->eth_dev->data->dev_private); + rte_eth_dev_release_port(pi->eth_dev); + } + } + return -err; +} diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c index 3d5aa596..b5d3611d 100644 --- a/drivers/net/cxgbe/sge.c +++ b/drivers/net/cxgbe/sge.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #include @@ -337,7 +309,11 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) * mechanism. */ if (unlikely(!q->bar2_addr)) { - t4_write_reg_relaxed(adap, MYPF_REG(A_SGE_PF_KDOORBELL), + u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) : + T4VF_SGE_BASE_ADDR + + A_SGE_VF_KDOORBELL; + + t4_write_reg_relaxed(adap, reg, val | V_QID(q->cntxt_id)); } else { writel_relaxed(val | V_QID(q->bar2_qid), @@ -385,7 +361,8 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q, struct rte_mbuf *buf_bulk[n]; int ret, i; struct rte_pktmbuf_pool_private *mbp_priv; - u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame; + u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_JUMBO_FRAME; /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */ mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool); @@ -570,12 +547,16 @@ static inline int is_eth_imm(const struct rte_mbuf *m) /** * calc_tx_flits - calculate the number of flits for a packet Tx WR * @m: the packet + * @adap: adapter structure pointer * * Returns the number of flits needed for a Tx WR for the given Ethernet * packet, including the needed WR and CPL headers. */ -static inline unsigned int calc_tx_flits(const struct rte_mbuf *m) +static inline unsigned int calc_tx_flits(const struct rte_mbuf *m, + struct adapter *adap) { + size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) : + sizeof(struct fw_eth_tx_pkt_vm_wr); unsigned int flits; int hdrlen; @@ -600,11 +581,10 @@ static inline unsigned int calc_tx_flits(const struct rte_mbuf *m) */ flits = sgl_len(m->nb_segs); if (m->tso_segsz) - flits += (sizeof(struct fw_eth_tx_pkt_wr) + - sizeof(struct cpl_tx_pkt_lso_core) + + flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); else - flits += (sizeof(struct fw_eth_tx_pkt_wr) + + flits += (wr_size + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); return flits; } @@ -848,14 +828,20 @@ static void tx_timer_cb(void *data) static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, struct sge_eth_txq *txq) { - u32 wr_mid; - struct sge_txq *q = &txq->q; + struct fw_eth_tx_pkts_vm_wr *vmwr; + const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) + + sizeof(vmwr->ethmacsrc) + + sizeof(vmwr->ethtype) + + sizeof(vmwr->vlantci)); struct fw_eth_tx_pkts_wr *wr; + struct sge_txq *q = &txq->q; unsigned int ndesc; + u32 wr_mid; /* fill the pkts WR header */ wr = (void *)&q->desc[q->pidx]; wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); + vmwr = (void *)&q->desc[q->pidx]; wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2)); ndesc = flits_to_desc(q->coalesce.flits); @@ -863,12 +849,18 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, wr->plen = cpu_to_be16(q->coalesce.len); wr->npkt = q->coalesce.idx; wr->r3 = 0; - wr->type = q->coalesce.type; + if (is_pf4(adap)) { + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); + wr->type = q->coalesce.type; + } else { + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); + vmwr->r4 = 0; + memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst, + fw_hdr_copy_len); + } /* zero out coalesce structure members */ - q->coalesce.idx = 0; - q->coalesce.flits = 0; - q->coalesce.len = 0; + memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce)); txq_advance(q, ndesc); txq->stats.coal_wr++; @@ -896,13 +888,27 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq, unsigned int *nflits, struct adapter *adap) { + struct fw_eth_tx_pkts_vm_wr *wr; + const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci)); struct sge_txq *q = &txq->q; unsigned int flits, ndesc; unsigned char type = 0; - int credits; + int credits, wr_size; /* use coal WR type 1 when no frags are present */ type = (mbuf->nb_segs == 1) ? 1 : 0; + if (!is_pf4(adap)) { + if (!type) + return 0; + + if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst, + rte_pktmbuf_mtod(mbuf, void *), + fw_hdr_copy_len)) + ship_tx_pkt_coalesce_wr(adap, txq); + } if (unlikely(type != q->coalesce.type && q->coalesce.idx)) ship_tx_pkt_coalesce_wr(adap, txq); @@ -948,16 +954,21 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq, new: /* start a new pkts WR, the WR header is not filled below */ - flits += sizeof(struct fw_eth_tx_pkts_wr) / sizeof(__be64); + wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) : + sizeof(struct fw_eth_tx_pkts_vm_wr); + flits += wr_size / sizeof(__be64); ndesc = flits_to_desc(q->coalesce.flits + flits); credits = txq_avail(q) - ndesc; if (unlikely(credits < 0 || wraps_around(q, ndesc))) return 0; - q->coalesce.flits += 2; + q->coalesce.flits += wr_size / sizeof(__be64); q->coalesce.type = type; q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] + - 2 * sizeof(__be64); + q->coalesce.flits * sizeof(__be64); + if (!is_pf4(adap)) + memcpy((void *)q->coalesce.ethmacdst, + rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len); return 1; } @@ -987,6 +998,8 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq, struct cpl_tx_pkt_core *cpl; struct tx_sw_desc *sd; unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len; + unsigned int max_coal_pkt_num = is_pf4(adap) ? ETH_COALESCE_PKT_NUM : + ETH_COALESCE_VF_PKT_NUM; #ifdef RTE_LIBRTE_CXGBE_TPUT RTE_SET_USED(nb_pkts); @@ -1030,9 +1043,12 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq, cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci); } - cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | - V_TXPKT_INTF(pi->tx_chan) | - V_TXPKT_PF(adap->pf)); + cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT)); + if (is_pf4(adap)) + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) | + V_TXPKT_PF(adap->pf)); + else + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id)); cpl->pack = htons(0); cpl->len = htons(len); cpl->ctrl1 = cpu_to_be64(cntrl); @@ -1061,7 +1077,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq, sd->coalesce.idx = (idx & 1) + 1; /* send the coaelsced work request if max reached */ - if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM + if (++q->coalesce.idx == max_coal_pkt_num #ifndef RTE_LIBRTE_CXGBE_TPUT || q->coalesce.idx >= nb_pkts #endif @@ -1085,6 +1101,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, struct adapter *adap; struct rte_mbuf *m = mbuf; struct fw_eth_tx_pkt_wr *wr; + struct fw_eth_tx_pkt_vm_wr *vmwr; struct cpl_tx_pkt_core *cpl; struct tx_sw_desc *d; dma_addr_t addr[m->nb_segs]; @@ -1095,7 +1112,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, u32 wr_mid; u64 cntrl, *end; bool v6; - u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len; /* Reject xmit if queue is stopped */ if (unlikely(txq->flags & EQ_STOPPED)) @@ -1115,7 +1132,7 @@ out_free: (unlikely(m->pkt_len > max_pkt_len))) goto out_free; - pi = (struct port_info *)txq->eth_dev->data->dev_private; + pi = (struct port_info *)txq->data->dev_private; adap = pi->adapter; cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; @@ -1141,7 +1158,7 @@ out_free: if (txq->q.coalesce.idx) ship_tx_pkt_coalesce_wr(adap, txq); - flits = calc_tx_flits(m); + flits = calc_tx_flits(m, adap); ndesc = flits_to_desc(flits); credits = txq_avail(&txq->q) - ndesc; @@ -1163,31 +1180,55 @@ out_free: } wr = (void *)&txq->q.desc[txq->q.pidx]; + vmwr = (void *)&txq->q.desc[txq->q.pidx]; wr->equiq_to_len16 = htonl(wr_mid); - wr->r3 = rte_cpu_to_be_64(0); - end = (u64 *)wr + flits; + if (is_pf4(adap)) { + wr->r3 = rte_cpu_to_be_64(0); + end = (u64 *)wr + flits; + } else { + const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) + + sizeof(vmwr->ethmacsrc) + + sizeof(vmwr->ethtype) + + sizeof(vmwr->vlantci)); + + vmwr->r3[0] = rte_cpu_to_be_32(0); + vmwr->r3[1] = rte_cpu_to_be_32(0); + memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *), + fw_hdr_copy_len); + end = (u64 *)vmwr + flits; + } len = 0; len += sizeof(*cpl); /* Coalescing skipped and we send through normal path */ if (!(m->ol_flags & PKT_TX_TCP_SEG)) { - wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | + wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ? + FW_ETH_TX_PKT_WR : + FW_ETH_TX_PKT_VM_WR) | V_FW_WR_IMMDLEN(len)); - cpl = (void *)(wr + 1); + if (is_pf4(adap)) + cpl = (void *)(wr + 1); + else + cpl = (void *)(vmwr + 1); if (m->ol_flags & PKT_TX_IP_CKSUM) { cntrl = hwcsum(adap->params.chip, m) | F_TXPKT_IPCSUM_DIS; txq->stats.tx_cso++; } } else { - lso = (void *)(wr + 1); + if (is_pf4(adap)) + lso = (void *)(wr + 1); + else + lso = (void *)(vmwr + 1); v6 = (m->ol_flags & PKT_TX_IPV6) != 0; l3hdr_len = m->l3_len; l4hdr_len = m->l4_len; eth_xtra_len = m->l2_len - ETHER_HDR_LEN; len += sizeof(*lso); - wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | + wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ? + FW_ETH_TX_PKT_WR : + FW_ETH_TX_PKT_VM_WR) | V_FW_WR_IMMDLEN(len)); lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | @@ -1221,9 +1262,14 @@ out_free: cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci); } - cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | - V_TXPKT_INTF(pi->tx_chan) | - V_TXPKT_PF(adap->pf)); + cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT)); + if (is_pf4(adap)) + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) | + V_TXPKT_PF(adap->pf)); + else + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) | + V_TXPKT_PF(0)); + cpl->pack = htons(0); cpl->len = htons(m->pkt_len); cpl->ctrl1 = cpu_to_be64(cntrl); @@ -1299,7 +1345,8 @@ static void *alloc_ring(size_t nelem, size_t elem_size, * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ - tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096); + tz = rte_memzone_reserve_aligned(z_name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, 4096); if (!tz) return NULL; @@ -1468,6 +1515,7 @@ static int process_responses(struct sge_rspq *q, int budget, rsp_type = G_RSPD_TYPE(rc->u.type_gen); if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) { + struct sge *s = &q->adapter->sge; unsigned int stat_pidx; int stat_pidx_diff; @@ -1550,10 +1598,12 @@ static int process_responses(struct sge_rspq *q, int budget, } if (cpl->vlan_ex) { - pkt->ol_flags |= PKT_RX_VLAN; + pkt->ol_flags |= PKT_RX_VLAN | + PKT_RX_VLAN_STRIPPED; pkt->vlan_tci = ntohs(cpl->vlan); } + rte_pktmbuf_adj(pkt, s->pktshift); rxq->stats.pkts++; rxq->stats.rx_bytes += pkt->pkt_len; rx_pkts[budget - budget_left] = pkt; @@ -1612,7 +1662,11 @@ int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, val = V_CIDXINC(cidx_inc) | V_SEINTARM(params); if (unlikely(!q->bar2_addr)) { - t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS), + u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) : + T4VF_SGE_BASE_ADDR + + A_SGE_VF_GTS; + + t4_write_reg(q->adapter, reg, val | V_INGRESSQID((u32)q->cntxt_id)); } else { writel(val | V_INGRESSQID(q->bar2_qid), @@ -1689,6 +1743,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, char z_name[RTE_MEMZONE_NAMESIZE]; char z_name_sw[RTE_MEMZONE_NAMESIZE]; unsigned int nb_refill; + u8 pciechan; /* Size needs to be multiple of 16, including status entry. */ iq->size = cxgbe_roundup(iq->size, 16); @@ -1706,8 +1761,19 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_WRITE | F_FW_CMD_EXEC | - V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0)); + F_FW_CMD_WRITE | F_FW_CMD_EXEC); + + if (is_pf4(adap)) { + pciechan = cong > 0 ? cxgbe_ffs(cong) - 1 : pi->tx_chan; + c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) | + V_FW_IQ_CMD_VFN(0)); + if (cong >= 0) + c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN | + F_FW_IQ_CMD_IQRO); + } else { + pciechan = pi->port_id; + } + c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | (sizeof(c) / 16)); c.type_to_iqandstindex = @@ -1719,16 +1785,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : -intr_idx - 1)); c.iqdroprss_to_iqesize = - htons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 : - pi->tx_chan) | + htons(V_FW_IQ_CMD_IQPCIECH(pciechan) | F_FW_IQ_CMD_IQGTSMODE | V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); c.iqsize = htons(iq->size); c.iqaddr = cpu_to_be64(iq->phys_addr); - if (cong >= 0) - c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN | - F_FW_IQ_CMD_IQRO); if (fl) { struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq, @@ -1768,7 +1830,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 0 : F_FW_IQ_CMD_FL0PACKEN) | F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | F_FW_IQ_CMD_FL0PADEN); - if (cong >= 0) + if (is_pf4(adap) && cong >= 0) c.iqns_to_fl0congen |= htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | @@ -1789,7 +1851,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, c.fl0addr = cpu_to_be64(fl->addr); } - ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); if (ret) goto err; @@ -1806,7 +1871,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->stat = (void *)&iq->desc[iq->size * 8]; iq->eth_dev = eth_dev; iq->handler = hnd; - iq->port_id = pi->port_id; + iq->port_id = pi->pidx; iq->mb_pool = mp; /* set offset to -1 to distinguish ingress queues without FL */ @@ -1846,7 +1911,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, * a lot easier to fix in one place ... For now we do something very * simple (and hopefully less wrong). */ - if (!is_t4(adap->params.chip) && cong >= 0) { + if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) { u32 param, val; int i; @@ -1893,9 +1958,11 @@ err: return ret; } -static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) +static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id, + unsigned int abs_id) { q->cntxt_id = id; + q->abs_id = abs_id; q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS, &q->bar2_qid); q->cidx = 0; @@ -1943,6 +2010,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); char z_name[RTE_MEMZONE_NAMESIZE]; char z_name_sw[RTE_MEMZONE_NAMESIZE]; + u8 pciechan; /* Add status entries */ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); @@ -1961,16 +2029,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_WRITE | F_FW_CMD_EXEC | - V_FW_EQ_ETH_CMD_PFN(adap->pf) | - V_FW_EQ_ETH_CMD_VFN(0)); + F_FW_CMD_WRITE | F_FW_CMD_EXEC); + if (is_pf4(adap)) { + pciechan = pi->tx_chan; + c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) | + V_FW_EQ_ETH_CMD_VFN(0)); + } else { + pciechan = pi->port_id; + } + c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC | F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16)); c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->viid)); c.fetchszm_to_iqid = htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | - V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | + V_FW_EQ_ETH_CMD_PCIECHN(pciechan) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid)); c.dcaen_to_eqsize = htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | @@ -1978,7 +2052,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, V_FW_EQ_ETH_CMD_EQSIZE(nentries)); c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); if (ret) { rte_free(txq->q.sdesc); txq->q.sdesc = NULL; @@ -1986,7 +2063,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, return ret; } - init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd))); + init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)), + G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd))); txq->stats.tso = 0; txq->stats.pkts = 0; txq->stats.tx_cso = 0; @@ -1997,6 +2075,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, txq->stats.mapping_err = 0; txq->flags |= EQ_STOPPED; txq->eth_dev = eth_dev; + txq->data = eth_dev->data; t4_os_lock_init(&txq->txq_lock); return 0; } @@ -2280,3 +2359,182 @@ int t4_sge_init(struct adapter *adap) return 0; } + +int t4vf_sge_init(struct adapter *adap) +{ + struct sge_params *sge_params = &adap->params.sge; + u32 sge_ingress_queues_per_page; + u32 sge_egress_queues_per_page; + u32 sge_control, sge_control2; + u32 fl_small_pg, fl_large_pg; + u32 sge_ingress_rx_threshold; + u32 sge_timer_value_0_and_1; + u32 sge_timer_value_2_and_3; + u32 sge_timer_value_4_and_5; + u32 sge_congestion_control; + struct sge *s = &adap->sge; + unsigned int s_hps, s_qpp; + u32 sge_host_page_size; + u32 params[7], vals[7]; + int v; + + /* query basic params from fw */ + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE)); + params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0)); + params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1)); + params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1)); + params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3)); + params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5)); + v = t4vf_query_params(adap, 7, params, vals); + if (v != FW_SUCCESS) + return v; + + sge_control = vals[0]; + sge_host_page_size = vals[1]; + fl_small_pg = vals[2]; + fl_large_pg = vals[3]; + sge_timer_value_0_and_1 = vals[4]; + sge_timer_value_2_and_3 = vals[5]; + sge_timer_value_4_and_5 = vals[6]; + + /* + * Start by vetting the basic SGE parameters which have been set up by + * the Physical Function Driver. + */ + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != CXGBE_PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { + dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", + fl_small_pg, fl_large_pg); + return -EINVAL; + } + + if ((sge_control & F_RXPKTCPLMODE) != + V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { + dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + + /* Grab ingress packing boundary from SGE_CONTROL2 for */ + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2)); + v = t4vf_query_params(adap, 1, params, vals); + if (v != FW_SUCCESS) { + dev_err(adapter, "Unable to get SGE Control2; " + "probably old firmware.\n"); + return v; + } + sge_control2 = vals[0]; + + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL)); + v = t4vf_query_params(adap, 2, params, vals); + if (v != FW_SUCCESS) + return v; + sge_ingress_rx_threshold = vals[0]; + sge_congestion_control = vals[1]; + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF)); + v = t4vf_query_params(adap, 2, params, vals); + if (v != FW_SUCCESS) { + dev_warn(adap, "Unable to get VF SGE Queues/Page; " + "probably old firmware.\n"); + return v; + } + sge_egress_queues_per_page = vals[0]; + sge_ingress_queues_per_page = vals[1]; + + /* + * We need the Queues/Page for our VF. This is based on the + * PF from which we're instantiated and is indexed in the + * register we just read. + */ + s_hps = (S_HOSTPAGESIZEPF0 + + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf); + sge_params->hps = + ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0); + + s_qpp = (S_QUEUESPERPAGEPF0 + + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf); + sge_params->eq_qpp = + ((sge_egress_queues_per_page >> s_qpp) + & M_QUEUESPERPAGEPF0); + sge_params->iq_qpp = + ((sge_ingress_queues_per_page >> s_qpp) + & M_QUEUESPERPAGEPF0); + + /* + * Now translate the queried parameters into our internal forms. + */ + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; + s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE) + ? 128 : 64); + s->pktshift = G_PKTSHIFT(sge_control); + s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2); + + /* + * A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) + */ + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T5: + s->fl_starve_thres = + G_EGRTHRESHOLDPACKING(sge_congestion_control); + break; + case CHELSIO_T6: + default: + s->fl_starve_thres = + G_T6_EGRTHRESHOLDPACKING(sge_congestion_control); + break; + } + s->fl_starve_thres = s->fl_starve_thres * 2 + 1; + + /* + * Save RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + s->timer_val[0] = core_ticks_to_us(adap, + G_TIMERVALUE0(sge_timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adap, + G_TIMERVALUE1(sge_timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adap, + G_TIMERVALUE2(sge_timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adap, + G_TIMERVALUE3(sge_timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adap, + G_TIMERVALUE4(sge_timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adap, + G_TIMERVALUE5(sge_timer_value_4_and_5)); + s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold); + s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold); + s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold); + s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold); + return 0; +} diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile index 9c2a5ea8..d7a0a50c 100644 --- a/drivers/net/dpaa/Makefile +++ b/drivers/net/dpaa/Makefile @@ -27,6 +27,9 @@ EXPORT_MAP := rte_pmd_dpaa_version.map LIBABIVER := 1 +# depends on dpaa bus which uses experimental API +CFLAGS += -DALLOW_EXPERIMENTAL_API + # Interfaces with DPDK SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index 9b69ef45..d014a11a 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -45,14 +45,42 @@ #include #include +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_JUMBO_FRAME; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + /* Keep track of whether QMAN and BMAN have been globally initialized */ static int is_global_init; -/* At present we only allow up to 4 push mode queues - as each of this queue - * need dedicated portal and we are short of portals. +/* At present we only allow up to 4 push mode queues as default - as each of + * this queue need dedicated portal and we are short of portals. */ -#define DPAA_MAX_PUSH_MODE_QUEUE 4 +#define DPAA_MAX_PUSH_MODE_QUEUE 8 +#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 -static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; +static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ @@ -95,6 +123,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { static struct rte_dpaa_driver rte_dpaa_pmd; +static void +dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); + static inline void dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) { @@ -122,9 +153,11 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) return -EINVAL; if (frame_size > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads &= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -134,13 +167,32 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } static int -dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +dpaa_eth_dev_configure(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + /* Rx offloads validation */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA_PMD_WARN( + "Rx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads validation */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA_PMD_WARN( + "Tx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= DPAA_MAX_RX_PKT_LEN) { fman_if_set_maxfrm(dpaa_intf->fif, @@ -256,14 +308,12 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev, dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G); - dev_info->rx_offload_capa = - (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM); - dev_info->tx_offload_capa = - (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM); + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; + dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; + dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; } static int dpaa_eth_link_update(struct rte_eth_dev *dev, @@ -275,9 +325,9 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (dpaa_intf->fif->mac_type == fman_mac_1g) - link->link_speed = 1000; + link->link_speed = ETH_SPEED_NUM_1G; else if (dpaa_intf->fif->mac_type == fman_mac_10g) - link->link_speed = 10000; + link->link_speed = ETH_SPEED_NUM_10G; else DPAA_PMD_ERR("invalid link_speed: %s, %d", dpaa_intf->name, dpaa_intf->fif->mac_type); @@ -316,12 +366,12 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); uint64_t values[sizeof(struct dpaa_if_stats) / 8]; - if (xstats == NULL) - return 0; - if (n < num) return num; + if (xstats == NULL) + return 0; + fman_if_stats_get_all(dpaa_intf->fif, values, sizeof(struct dpaa_if_stats) / 8); @@ -335,10 +385,13 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, static int dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, - __rte_unused unsigned int limit) + unsigned int limit) { unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); + if (limit < stat_cnt) + return stat_cnt; + if (xstats_names != NULL) for (i = 0; i < stat_cnt; i++) snprintf(xstats_names[i].name, @@ -366,7 +419,7 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, return 0; fman_if_stats_get_all(dpaa_intf->fif, values_copy, - sizeof(struct dpaa_if_stats)); + sizeof(struct dpaa_if_stats) / 8); for (i = 0; i < stat_cnt; i++) values[i] = @@ -813,7 +866,7 @@ dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, fman_if_clear_mac_addr(dpaa_intf->fif, index); } -static void +static int dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { @@ -825,6 +878,8 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); if (ret) RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret); + + return ret; } static struct eth_dev_ops dpaa_devops = { @@ -1105,10 +1160,10 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; } - /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX + /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX * queues. */ - if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) { + if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { DPAA_PMD_ERR("Invalid number of RX queues\n"); return -EINVAL; } @@ -1317,6 +1372,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); if (!eth_dev) return -ENOMEM; + rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -1366,8 +1422,10 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, /* Invoke PMD device initialization function */ diag = dpaa_dev_init(eth_dev); - if (diag == 0) + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); return 0; + } if (rte_eal_process_type() == RTE_PROC_PRIMARY) rte_free(eth_dev->data->dev_private); diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h index c051ae32..1897b9e4 100644 --- a/drivers/net/dpaa/dpaa_ethdev.h +++ b/drivers/net/dpaa/dpaa_ethdev.h @@ -51,6 +51,10 @@ /*Maximum number of slots available in TX ring*/ #define DPAA_TX_BURST_SIZE 7 +/* Optimal burst size for RX and TX as default */ +#define DPAA_DEF_RX_BURST_SIZE 7 +#define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE + #ifndef VLAN_TAG_SIZE #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ #endif @@ -74,14 +78,10 @@ #define DPAA_DEBUG_FQ_TX_ERROR 1 #define DPAA_RSS_OFFLOAD_ALL ( \ - ETH_RSS_FRAG_IPV4 | \ - ETH_RSS_NONFRAG_IPV4_TCP | \ - ETH_RSS_NONFRAG_IPV4_UDP | \ - ETH_RSS_NONFRAG_IPV4_SCTP | \ - ETH_RSS_FRAG_IPV6 | \ - ETH_RSS_NONFRAG_IPV6_TCP | \ - ETH_RSS_NONFRAG_IPV6_UDP | \ - ETH_RSS_NONFRAG_IPV6_SCTP) + ETH_RSS_IP | \ + ETH_RSS_UDP | \ + ETH_RSS_TCP | \ + ETH_RSS_SCTP) #define DPAA_TX_CKSUM_OFFLOAD_MASK ( \ PKT_TX_IP_CKSUM | \ diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c index 0dea8e79..1316d2ad 100644 --- a/drivers/net/dpaa/dpaa_rxtx.c +++ b/drivers/net/dpaa/dpaa_rxtx.c @@ -59,7 +59,7 @@ } while (0) #if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER) -void dpaa_display_frame(const struct qm_fd *fd) +static void dpaa_display_frame(const struct qm_fd *fd) { int ii; char *ptr; @@ -90,11 +90,10 @@ static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused, /*TBD:XXX: to be implemented*/ } -static inline void dpaa_eth_packet_info(struct rte_mbuf *m, - uint64_t fd_virt_addr) +static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr) { struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr); - uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK; + uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK; DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot); @@ -351,7 +350,7 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid) prev_seg = cur_seg; } - dpaa_eth_packet_info(first_seg, (uint64_t)vaddr); + dpaa_eth_packet_info(first_seg, vaddr); rte_pktmbuf_free_seg(temp); return first_seg; @@ -394,7 +393,7 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid) mbuf->ol_flags = 0; mbuf->next = NULL; rte_mbuf_refcnt_set(mbuf, 1); - dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); return mbuf; } @@ -455,7 +454,7 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr, mbuf->ol_flags = 0; mbuf->next = NULL; rte_mbuf_refcnt_set(mbuf, 1); - dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr); + dpaa_eth_packet_info(mbuf, mbuf->buf_addr); } } @@ -593,7 +592,7 @@ uint16_t dpaa_eth_queue_rx(void *q, static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) { int ret; - uint64_t buf = 0; + size_t buf = 0; struct bm_buffer bufs; ret = bman_acquire(bp_info->bp, &bufs, 1, 0); @@ -602,10 +601,10 @@ static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info) return (void *)buf; } - DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d", + DPAA_DP_LOG(DEBUG, "got buffer 0x%" PRIx64 " from pool %d", (uint64_t)bufs.addr, bufs.bpid); - buf = (uint64_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr) + buf = (size_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr) - bp_info->meta_data_size; if (!buf) goto out; @@ -826,6 +825,8 @@ tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf, } DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid); + if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) + dpaa_unsegmented_checksum(mbuf, fd_arr); return 0; } diff --git a/drivers/net/dpaa/meson.build b/drivers/net/dpaa/meson.build new file mode 100644 index 00000000..62dec7b0 --- /dev/null +++ b/drivers/net/dpaa/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif +deps += ['mempool_dpaa'] + +sources = files('dpaa_ethdev.c', + 'dpaa_rxtx.c') + +allow_experimental_apis = true + +install_headers('rte_pmd_dpaa.h') diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map b/drivers/net/dpaa/rte_pmd_dpaa_version.map index 3b937b10..c7ad4030 100644 --- a/drivers/net/dpaa/rte_pmd_dpaa_version.map +++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map @@ -9,6 +9,4 @@ EXPERIMENTAL { dpaa_eth_eventq_attach; dpaa_eth_eventq_detach; rte_pmd_dpaa_set_tx_loopback; - - local: *; -} DPDK_17.11; +}; diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile index 5a93a0b9..9b0b1433 100644 --- a/drivers/net/dpaa2/Makefile +++ b/drivers/net/dpaa2/Makefile @@ -10,14 +10,8 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_pmd_dpaa2.a -ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y) -CFLAGS += -O0 -g -CFLAGS += "-Wno-error" -else CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -endif - CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc @@ -25,7 +19,6 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2 -CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal # versioning export map @@ -34,6 +27,9 @@ EXPORT_MAP := rte_pmd_dpaa2_version.map # library version LIBABIVER := 1 +# depends on fslmc bus which uses experimental API +CFLAGS += -DALLOW_EXPERIMENTAL_API + SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c index b93376de..713a41bf 100644 --- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c +++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include @@ -42,7 +42,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, p_params = rte_malloc( NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); if (!p_params) { - PMD_INIT_LOG(ERR, "Memory unavailable"); + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); return -ENOMEM; } memset(p_params, 0, DIST_PARAM_IOVA_SIZE); @@ -50,8 +50,8 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg); if (ret) { - PMD_INIT_LOG(ERR, "given rss_hf (%lx) not supported", - req_dist_set); + DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported", + req_dist_set); rte_free(p_params); return ret; } @@ -61,7 +61,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); if (ret) { - PMD_INIT_LOG(ERR, "Unable to prepare extract parameters"); + DPAA2_PMD_ERR("Unable to prepare extract parameters"); rte_free(p_params); return ret; } @@ -70,7 +70,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, &tc_cfg); rte_free(p_params); if (ret) { - PMD_INIT_LOG(ERR, + DPAA2_PMD_ERR( "Setting distribution for Rx failed with err: %d", ret); return ret; @@ -93,7 +93,7 @@ int dpaa2_remove_flow_dist( p_params = rte_malloc( NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); if (!p_params) { - PMD_INIT_LOG(ERR, "Memory unavailable"); + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); return -ENOMEM; } memset(p_params, 0, DIST_PARAM_IOVA_SIZE); @@ -105,7 +105,7 @@ int dpaa2_remove_flow_dist( ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); if (ret) { - PMD_INIT_LOG(ERR, "Unable to prepare extract parameters"); + DPAA2_PMD_ERR("Unable to prepare extract parameters"); rte_free(p_params); return ret; } @@ -114,8 +114,8 @@ int dpaa2_remove_flow_dist( &tc_cfg); rte_free(p_params); if (ret) - PMD_INIT_LOG(ERR, - "Setting distribution for Rx failed with err:%d", + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", ret); return ret; } @@ -256,7 +256,7 @@ dpaa2_distset_to_dpkg_profile_cfg( break; default: - PMD_INIT_LOG(WARNING, + DPAA2_PMD_WARN( "Unsupported flow dist option %x", dist_field); return -EINVAL; @@ -307,7 +307,7 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, &layout); if (retcode) { - PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout\n", + DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)", retcode); return retcode; } @@ -322,9 +322,9 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg); if (retcode != 0) { - PMD_INIT_LOG(ERR, "Error in attaching the buffer pool list" - " bpid = %d Error code = %d\n", - bpool_cfg.pools[0].dpbp_id, retcode); + DPAA2_PMD_ERR("Error configuring buffer pool on interface." + " bpid = %d error code = %d", + bpool_cfg.pools[0].dpbp_id, retcode); return retcode; } diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 09a11d65..9297725d 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -18,7 +18,7 @@ #include #include -#include +#include "dpaa2_pmd_logs.h" #include #include #include @@ -27,6 +27,36 @@ #include "dpaa2_ethdev.h" #include +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + struct rte_dpaa2_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; uint8_t page_id; /* dpni statistics page id */ @@ -57,57 +87,7 @@ static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &dev->data->dev_link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} +int dpaa2_logtype_pmd; static int dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) @@ -119,7 +99,7 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return -1; } @@ -131,8 +111,8 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) priv->token, vlan_id); if (ret < 0) - PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d", - ret, vlan_id, priv->hw_id); + DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", + ret, vlan_id, priv->hw_id); return ret; } @@ -149,25 +129,25 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) if (mask & ETH_VLAN_FILTER_MASK) { /* VLAN Filter not avaialble */ if (!priv->max_vlan_filters) { - RTE_LOG(INFO, PMD, "VLAN filter not available\n"); + DPAA2_PMD_INFO("VLAN filter not available"); goto next_mask; } - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, priv->token, true); else ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, priv->token, false); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n", - ret); + DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); } next_mask: if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) - RTE_LOG(INFO, PMD, - "VLAN extend offload not supported\n"); + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) + DPAA2_PMD_INFO("VLAN extend offload not supported"); } return 0; @@ -187,10 +167,10 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) - RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n"); + DPAA2_PMD_WARN("\tmc_get_soc_version failed"); if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) - RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n"); + DPAA2_PMD_WARN("\tmc_get_version failed"); ret = snprintf(fw_version, fw_size, "%x-%d.%d.%d", @@ -220,20 +200,18 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_10G; + + dev_info->max_hash_mac_addrs = 0; + dev_info->max_vfs = 0; + dev_info->max_vmdq_pools = ETH_16_POOLS; + dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; } static int @@ -253,7 +231,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, RTE_CACHE_LINE_SIZE); if (!mc_q) { - PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n"); + DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); return -1; } @@ -320,18 +298,39 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) struct dpaa2_dev_priv *priv = dev->data->dev_private; struct fsl_mc_io *dpni = priv->hw; struct rte_eth_conf *eth_conf = &dev->data->dev_conf; - int rx_ip_csum_offload = false; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; + int rx_l3_csum_offload = false; + int rx_l4_csum_offload = false; + int tx_l3_csum_offload = false; + int tx_l4_csum_offload = false; int ret; PMD_INIT_FUNC_TRACE(); - if (eth_conf->rxmode.jumbo_frame == 1) { + /* Rx offloads validation */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA2_PMD_WARN( + "Rx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads validation */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA2_PMD_WARN( + "Tx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, eth_conf->rxmode.max_rx_pkt_len); if (ret) { - PMD_INIT_LOG(ERR, - "unable to set mtu. check config\n"); + DPAA2_PMD_ERR( + "Unable to set mtu. check config"); return ret; } } else { @@ -343,40 +342,52 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) ret = dpaa2_setup_flow_dist(dev, eth_conf->rx_adv_conf.rss_conf.rss_hf); if (ret) { - PMD_INIT_LOG(ERR, "unable to set flow distribution." - "please check queue config\n"); + DPAA2_PMD_ERR("Unable to set flow distribution." + "Check queue config"); return ret; } } - if (eth_conf->rxmode.hw_ip_checksum) - rx_ip_csum_offload = true; + if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rx_l3_csum_offload = true; + + if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || + (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM)) + rx_l4_csum_offload = true; ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload); + DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); if (ret) { - PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); + DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); return ret; } ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload); + DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); if (ret) { - PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); + DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); return ret; } + if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + tx_l3_csum_offload = true; + + if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) + tx_l4_csum_offload = true; + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_TX_L3_CSUM, true); + DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); if (ret) { - PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); + DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); return ret; } ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_TX_L4_CSUM, true); + DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); if (ret) { - PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); + DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); return ret; } @@ -390,14 +401,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, DPNI_FLCTYPE_HASH, true); if (ret) { - PMD_INIT_LOG(ERR, "Error setting FLCTYPE: Err = %d\n", - ret); + DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); return ret; } } - if (eth_conf->rxmode.hw_vlan_filter) - dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); + dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); /* update the current status */ dpaa2_dev_link_update(dev, 0); @@ -427,8 +436,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", - dev, rx_queue_id, mb_pool, rx_conf); + DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", + dev, rx_queue_id, mb_pool, rx_conf); if (!priv->bp_list || priv->bp_list->mp != mb_pool) { bpid = mempool_to_bpid(mb_pool); @@ -445,7 +454,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, memset(&cfg, 0, sizeof(struct dpni_queue)); options = options | DPNI_QUEUE_OPT_USER_CTX; - cfg.user_context = (uint64_t)(dpaa2_q); + cfg.user_context = (size_t)(dpaa2_q); /*if ls2088 or rev2 device, enable the stashing */ @@ -467,7 +476,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, dpaa2_q->tc_index, flow_id, options, &cfg); if (ret) { - PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret); + DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); return -1; } @@ -479,14 +488,14 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, taildrop.threshold = CONG_THRESHOLD_RX_Q; taildrop.units = DPNI_CONGESTION_UNIT_BYTES; taildrop.oal = CONG_RX_OAL; - PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d", - rx_queue_id); + DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", + rx_queue_id); ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, DPNI_CP_QUEUE, DPNI_QUEUE_RX, dpaa2_q->tc_index, flow_id, &taildrop); if (ret) { - PMD_INIT_LOG(ERR, "Error in setting the rx flow" - " err : = %d\n", ret); + DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", + ret); return -1; } } @@ -529,9 +538,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, tc_id, flow_id, options, &tx_flow_cfg); if (ret) { - PMD_INIT_LOG(ERR, "Error in setting the tx flow: " - "tc_id=%d, flow =%d ErrorCode = %x\n", - tc_id, flow_id, -ret); + DPAA2_PMD_ERR("Error in setting the tx flow: " + "tc_id=%d, flow=%d err=%d", + tc_id, flow_id, ret); return -1; } @@ -543,8 +552,8 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, priv->token, DPNI_CONF_DISABLE); if (ret) { - PMD_INIT_LOG(ERR, "Error in set tx conf mode settings" - " ErrorCode = %x", ret); + DPAA2_PMD_ERR("Error in set tx conf mode settings: " + "err=%d", ret); return -1; } } @@ -560,7 +569,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, */ cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; cong_notif_cfg.message_ctx = 0; - cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn; + cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn; cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | @@ -573,9 +582,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, tc_id, &cong_notif_cfg); if (ret) { - PMD_INIT_LOG(ERR, - "Error in setting tx congestion notification: = %d", - -ret); + DPAA2_PMD_ERR( + "Error in setting tx congestion notification: " + "err=%d", ret); return -ret; } } @@ -610,7 +619,7 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_PMD_ERR("Failure in affining portal"); return -EINVAL; } } @@ -620,8 +629,8 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { frame_cnt = qbman_fq_state_frame_count(&state); - RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n", - rx_queue_id, frame_cnt); + DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", + rx_queue_id, frame_cnt); } return frame_cnt; } @@ -670,14 +679,14 @@ dpaa2_interrupt_handler(void *param) PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL"); + DPAA2_PMD_ERR("dpni is NULL"); return; } ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, irq_index, &status); if (unlikely(ret)) { - RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret); + DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); clear = 0xffffffff; goto out; } @@ -693,7 +702,7 @@ out: ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, irq_index, clear); if (unlikely(ret)) - RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret); + DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); } static int @@ -710,16 +719,16 @@ dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, irq_index, mask); if (err < 0) { - PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err, - strerror(-err)); + DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, + strerror(-err)); return err; } err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, irq_index, enable); if (err < 0) - PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err, - strerror(-err)); + DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, + strerror(-err)); return err; } @@ -747,8 +756,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev) ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n", - ret, priv->hw_id); + DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", + priv->hw_id, ret); return ret; } @@ -758,7 +767,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev) ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, &qdid); if (ret) { - PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret); + DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); return ret; } priv->qdid = qdid; @@ -769,8 +778,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev) DPNI_QUEUE_RX, dpaa2_q->tc_index, dpaa2_q->flow_id, &cfg, &qid); if (ret) { - PMD_INIT_LOG(ERR, "Error to get flow " - "information Error code = %d\n", ret); + DPAA2_PMD_ERR("Error in getting flow information: " + "err=%d", ret); return ret; } dpaa2_q->fqid = qid.fqid; @@ -785,8 +794,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev) ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, priv->token, &err_cfg); if (ret) { - PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" - "code = %d\n", ret); + DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", + ret); return ret; } @@ -845,14 +854,14 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n", - ret, priv->hw_id); + DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", + ret, priv->hw_id); return; } /* clear the recorded link status */ memset(&link, 0, sizeof(link)); - dpaa2_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); } static void @@ -878,13 +887,12 @@ dpaa2_dev_close(struct rte_eth_dev *dev) /* Clean the device first */ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure cleaning dpni device with" - " error code %d\n", ret); + DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); return; } memset(&link, 0, sizeof(link)); - dpaa2_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); } static void @@ -898,17 +906,17 @@ dpaa2_dev_promiscuous_enable( PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret); + DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret); + DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); } static void @@ -922,21 +930,20 @@ dpaa2_dev_promiscuous_disable( PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret); + DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); if (dev->data->all_multicast == 0) { ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); if (ret < 0) - RTE_LOG(ERR, PMD, - "Unable to disable M promisc mode %d\n", - ret); + DPAA2_PMD_ERR("Unable to disable M promisc mode %d", + ret); } } @@ -951,13 +958,13 @@ dpaa2_dev_allmulticast_enable( PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret); + DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); } static void @@ -970,7 +977,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } @@ -980,7 +987,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); if (ret < 0) - RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret); + DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); } static int @@ -995,7 +1002,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return -EINVAL; } @@ -1004,9 +1011,11 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; if (frame_size > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads &= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -1016,10 +1025,10 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, frame_size); if (ret) { - PMD_DRV_LOG(ERR, "setting the max frame length failed"); + DPAA2_PMD_ERR("Setting the max frame length failed"); return -1; } - PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu); + DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); return 0; } @@ -1036,15 +1045,15 @@ dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return -1; } ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, addr->addr_bytes); if (ret) - RTE_LOG(ERR, PMD, - "error: Adding the MAC ADDR failed: err = %d\n", ret); + DPAA2_PMD_ERR( + "error: Adding the MAC ADDR failed: err = %d", ret); return 0; } @@ -1063,18 +1072,18 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, macaddr = &data->mac_addrs[index]; if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, priv->token, macaddr->addr_bytes); if (ret) - RTE_LOG(ERR, PMD, - "error: Removing the MAC ADDR failed: err = %d\n", ret); + DPAA2_PMD_ERR( + "error: Removing the MAC ADDR failed: err = %d", ret); } -static void +static int dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { @@ -1085,17 +1094,20 @@ dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); - return; + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; } ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, priv->token, addr->addr_bytes); if (ret) - RTE_LOG(ERR, PMD, - "error: Setting the MAC ADDR failed %d\n", ret); + DPAA2_PMD_ERR( + "error: Setting the MAC ADDR failed %d", ret); + + return ret; } + static int dpaa2_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) @@ -1111,12 +1123,12 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (!dpni) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return -EINVAL; } if (!stats) { - RTE_LOG(ERR, PMD, "stats is NULL\n"); + DPAA2_PMD_ERR("stats is NULL"); return -EINVAL; } @@ -1155,7 +1167,7 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev, return 0; err: - RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); + DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); return retcode; }; @@ -1169,12 +1181,12 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, union dpni_statistics value[3] = {}; unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); - if (xstats == NULL) - return 0; - if (n < num) return num; + if (xstats == NULL) + return 0; + /* Get Counters from page_0*/ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 0, 0, &value[0]); @@ -1200,17 +1212,20 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, } return i; err: - RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode); + DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); return retcode; } static int dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, - __rte_unused unsigned int limit) + unsigned int limit) { unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + if (limit < stat_cnt) + return stat_cnt; + if (xstats_names != NULL) for (i = 0; i < stat_cnt; i++) snprintf(xstats_names[i].name, @@ -1269,7 +1284,7 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, for (i = 0; i < n; i++) { if (ids[i] >= stat_cnt) { - PMD_INIT_LOG(ERR, "id value isn't valid"); + DPAA2_PMD_ERR("xstats id value isn't valid"); return -1; } values[i] = values_copy[ids[i]]; @@ -1294,7 +1309,7 @@ dpaa2_xstats_get_names_by_id( for (i = 0; i < limit; i++) { if (ids[i] >= stat_cnt) { - PMD_INIT_LOG(ERR, "id value isn't valid"); + DPAA2_PMD_ERR("xstats id value isn't valid"); return -1; } strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); @@ -1312,7 +1327,7 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return; } @@ -1323,7 +1338,7 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev) return; error: - RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode); + DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); return; }; @@ -1335,24 +1350,17 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - struct rte_eth_link link, old; + struct rte_eth_link link; struct dpni_link_state state = {0}; if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return 0; } - memset(&old, 0, sizeof(old)); - dpaa2_dev_atomic_read_link_status(dev, &old); ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret < 0) { - RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); - return -1; - } - - if ((old.link_status == state.up) && (old.link_speed == state.rate)) { - RTE_LOG(DEBUG, PMD, "No change in status\n"); + DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); return -1; } @@ -1365,13 +1373,14 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, else link.link_duplex = ETH_LINK_FULL_DUPLEX; - dpaa2_dev_atomic_write_link_status(dev, &link); - - if (link.link_status) - PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id); + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == -1) + DPAA2_PMD_DEBUG("No change in status"); else - PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id); - return 0; + DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, + link.link_status ? "Up" : "Down"); + + return ret; } /** @@ -1391,7 +1400,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) dpni = (struct fsl_mc_io *)priv->hw; if (dpni == NULL) { - RTE_LOG(ERR, PMD, "DPNI is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return ret; } @@ -1399,7 +1408,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); if (ret) { /* Unable to obtain dpni status; Not continuing */ - PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); + DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); return -EINVAL; } @@ -1407,13 +1416,13 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) if (!en) { ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); if (ret) { - PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret); + DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); return -EINVAL; } } ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret < 0) { - RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); + DPAA2_PMD_ERR("Unable to get link state (%d)", ret); return -1; } @@ -1422,10 +1431,9 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) dev->data->dev_link.link_status = state.up; if (state.up) - PMD_DRV_LOG(INFO, "Port %d Link is set as UP", - dev->data->port_id); + DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); else - PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id); + DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); return ret; } @@ -1448,7 +1456,7 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) dpni = (struct fsl_mc_io *)priv->hw; if (dpni == NULL) { - RTE_LOG(ERR, PMD, "Device has not yet been configured\n"); + DPAA2_PMD_ERR("Device has not yet been configured"); return ret; } @@ -1461,12 +1469,12 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) do { ret = dpni_disable(dpni, 0, priv->token); if (ret) { - PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret); + DPAA2_PMD_ERR("dpni disable failed (%d)", ret); return ret; } ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); if (ret) { - PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret); + DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); return ret; } if (dpni_enabled) @@ -1475,12 +1483,12 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) } while (dpni_enabled && --retries); if (!retries) { - PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n"); + DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); /* todo- we may have to manually cleanup queues. */ } else { - PMD_DRV_LOG(INFO, "Port %d Link DOWN successful", - dev->data->port_id); + DPAA2_PMD_INFO("Port %d Link DOWN successful", + dev->data->port_id); } dev->data->dev_link.link_status = 0; @@ -1502,13 +1510,13 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) dpni = (struct fsl_mc_io *)priv->hw; if (dpni == NULL || fc_conf == NULL) { - RTE_LOG(ERR, PMD, "device not configured\n"); + DPAA2_PMD_ERR("device not configured"); return ret; } ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret) { - RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret); + DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); return ret; } @@ -1558,7 +1566,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) dpni = (struct fsl_mc_io *)priv->hw; if (dpni == NULL) { - RTE_LOG(ERR, PMD, "dpni is NULL\n"); + DPAA2_PMD_ERR("dpni is NULL"); return ret; } @@ -1568,7 +1576,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) */ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret) { - RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret); + DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); return -1; } @@ -1613,16 +1621,15 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; break; default: - RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n", - fc_conf->mode); + DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", + fc_conf->mode); return -1; } ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); if (ret) - RTE_LOG(ERR, PMD, - "Unable to set Link configuration (err=%d)\n", - ret); + DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", + ret); /* Enable link */ dpaa2_dev_set_link_up(dev); @@ -1643,13 +1650,13 @@ dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, if (rss_conf->rss_hf) { ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); if (ret) { - PMD_INIT_LOG(ERR, "unable to set flow dist"); + DPAA2_PMD_ERR("Unable to set flow dist"); return ret; } } else { ret = dpaa2_remove_flow_dist(dev, 0); if (ret) { - PMD_INIT_LOG(ERR, "unable to remove flow dist"); + DPAA2_PMD_ERR("Unable to remove flow dist"); return ret; } } @@ -1702,12 +1709,12 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, } options |= DPNI_QUEUE_OPT_USER_CTX; - cfg.user_context = (uint64_t)(dpaa2_ethq); + cfg.user_context = (size_t)(dpaa2_ethq); ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, dpaa2_ethq->tc_index, flow_id, options, &cfg); if (ret) { - RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); + DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); return ret; } @@ -1734,7 +1741,7 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, dpaa2_ethq->tc_index, flow_id, options, &cfg); if (ret) - RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret); + DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); return ret; } @@ -1801,15 +1808,15 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); if (!dpni_dev) { - PMD_INIT_LOG(ERR, "malloc failed for dpni device\n"); + DPAA2_PMD_ERR("Memory allocation failed for dpni device"); return -1; } dpni_dev->regs = rte_mcp_ptr_list[0]; ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); if (ret) { - PMD_INIT_LOG(ERR, - "Failure in opening dpni@%d with err code %d\n", + DPAA2_PMD_ERR( + "Failure in opening dpni@%d with err code %d", hw_id, ret); rte_free(dpni_dev); return -1; @@ -1818,16 +1825,15 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* Clean the device first */ ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, - "Failure cleaning dpni@%d with err code %d\n", - hw_id, ret); + DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", + hw_id, ret); goto init_err; } ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); if (ret) { - PMD_INIT_LOG(ERR, - "Failure in get dpni@%d attribute, err code %d\n", + DPAA2_PMD_ERR( + "Failure in get dpni@%d attribute, err code %d", hw_id, ret); goto init_err; } @@ -1843,8 +1849,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* Using number of TX queues as number of TX TCs */ priv->nb_tx_queues = attr.num_tx_tcs; - PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", - priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues); + DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", + priv->num_rx_tc, priv->nb_rx_queues, + priv->nb_tx_queues); priv->hw = dpni_dev; priv->hw_id = hw_id; @@ -1856,7 +1863,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for hardware structure for queues */ ret = dpaa2_alloc_rx_tx_queues(eth_dev); if (ret) { - PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n"); + DPAA2_PMD_ERR("Queue allocation Failed"); goto init_err; } @@ -1864,9 +1871,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) eth_dev->data->mac_addrs = rte_zmalloc("dpni", ETHER_ADDR_LEN * attr.mac_filter_entries, 0); if (eth_dev->data->mac_addrs == NULL) { - PMD_INIT_LOG(ERR, + DPAA2_PMD_ERR( "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * attr.mac_filter_entries); + ETHER_ADDR_LEN * attr.mac_filter_entries); ret = -ENOMEM; goto init_err; } @@ -1875,7 +1882,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) priv->token, (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); if (ret) { - PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n", + DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d", ret); goto init_err; } @@ -1887,8 +1894,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, &layout); if (ret) { - PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout", - ret); + DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); goto init_err; } @@ -1899,7 +1905,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX_CONFIRM, &layout); if (ret) { - PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout", + DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", ret); goto init_err; } @@ -1908,7 +1914,6 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; eth_dev->tx_pkt_burst = dpaa2_dev_tx; - rte_fslmc_vfio_dmamap(); RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); return 0; @@ -1931,7 +1936,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) return 0; if (!dpni) { - PMD_INIT_LOG(WARNING, "Already closed or not started"); + DPAA2_PMD_WARN("Already closed or not started"); return -1; } @@ -1958,8 +1963,8 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) /* Close the device at underlying layer*/ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, - "Failure closing dpni device with err code %d\n", + DPAA2_PMD_ERR( + "Failure closing dpni device with err code %d", ret); } @@ -1971,7 +1976,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; - RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); + DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); return 0; } @@ -1991,8 +1996,8 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, sizeof(struct dpaa2_dev_priv), RTE_CACHE_LINE_SIZE); if (eth_dev->data->dev_private == NULL) { - PMD_INIT_LOG(CRIT, "Cannot allocate memzone for" - " private port data\n"); + DPAA2_PMD_CRIT( + "Unable to allocate memory for private data"); rte_eth_dev_release_port(eth_dev); return -ENOMEM; } @@ -2013,8 +2018,10 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, /* Invoke PMD device initialization function */ diag = dpaa2_dev_init(eth_dev); - if (diag == 0) + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); return 0; + } if (rte_eal_process_type() == RTE_PROC_PRIMARY) rte_free(eth_dev->data->dev_private); @@ -2045,3 +2052,12 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = { }; RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); + +RTE_INIT(dpaa2_pmd_init_log); +static void +dpaa2_pmd_init_log(void) +{ + dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); + if (dpaa2_logtype_pmd >= 0) + rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); +} diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h index ba0856f3..bd69f523 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.h +++ b/drivers/net/dpaa2/dpaa2_ethdev.h @@ -50,6 +50,12 @@ /* Disable RX tail drop, default is enable */ #define DPAA2_RX_TAILDROP_OFF 0x04 +#define DPAA2_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IP | \ + ETH_RSS_UDP | \ + ETH_RSS_TCP | \ + ETH_RSS_SCTP) + /* LX2 FRC Parsed values (Little Endian) */ #define DPAA2_PKT_TYPE_ETHER 0x0060 #define DPAA2_PKT_TYPE_IPV4 0x0000 diff --git a/drivers/net/dpaa2/dpaa2_pmd_logs.h b/drivers/net/dpaa2/dpaa2_pmd_logs.h new file mode 100644 index 00000000..98a48968 --- /dev/null +++ b/drivers/net/dpaa2/dpaa2_pmd_logs.h @@ -0,0 +1,41 @@ +/*- + * SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 NXP + */ + +#ifndef _DPAA2_PMD_LOGS_H_ +#define _DPAA2_PMD_LOGS_H_ + +extern int dpaa2_logtype_pmd; + +#define DPAA2_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa2_logtype_pmd, "dpaa2_net: " \ + fmt "\n", ##args) + +#define DPAA2_PMD_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_pmd, "dpaa2_net: %s(): "\ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() DPAA2_PMD_LOG(DEBUG, " >>") + +#define DPAA2_PMD_CRIT(fmt, args...) \ + DPAA2_PMD_LOG(CRIT, fmt, ## args) +#define DPAA2_PMD_INFO(fmt, args...) \ + DPAA2_PMD_LOG(INFO, fmt, ## args) +#define DPAA2_PMD_ERR(fmt, args...) \ + DPAA2_PMD_LOG(ERR, fmt, ## args) +#define DPAA2_PMD_WARN(fmt, args...) \ + DPAA2_PMD_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA2_PMD_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA2_PMD_DP_DEBUG(fmt, args...) \ + DPAA2_PMD_DP_LOG(DEBUG, fmt, ## args) +#define DPAA2_PMD_DP_INFO(fmt, args...) \ + DPAA2_PMD_DP_LOG(INFO, fmt, ## args) +#define DPAA2_PMD_DP_WARN(fmt, args...) \ + DPAA2_PMD_DP_LOG(WARNING, fmt, ## args) + +#endif /* _DPAA2_PMD_LOGS_H_ */ diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index 183293c1..dac086d6 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -16,13 +16,12 @@ #include #include -#include #include #include #include #include -#include +#include "dpaa2_pmd_logs.h" #include "dpaa2_ethdev.h" #include "base/dpaa2_hw_dpni_annot.h" @@ -37,7 +36,7 @@ static inline void __attribute__((hot)) dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc) { - PMD_RX_LOG(DEBUG, "frc = 0x%x ", frc); + DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc); m->packet_type = RTE_PTYPE_UNKNOWN; switch (frc) { @@ -104,13 +103,12 @@ dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc) } static inline uint32_t __attribute__((hot)) -dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr) +dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation) { uint32_t pkt_type = RTE_PTYPE_UNKNOWN; - struct dpaa2_annot_hdr *annotation = - (struct dpaa2_annot_hdr *)hw_annot_addr; - PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4); + DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t", + annotation->word4); if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { pkt_type = RTE_PTYPE_L2_ETHER_ARP; goto parse_done; @@ -167,12 +165,13 @@ parse_done: } static inline uint32_t __attribute__((hot)) -dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr) +dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) { struct dpaa2_annot_hdr *annotation = (struct dpaa2_annot_hdr *)hw_annot_addr; - PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4); + DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", + annotation->word4); /* Check offloads first */ if (BIT_ISSET_AT_POS(annotation->word3, @@ -203,29 +202,27 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr) return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; default: - PMD_RX_LOG(DEBUG, "Slow parse the parsing results\n"); break; } - return dpaa2_dev_rx_parse_slow(hw_annot_addr); + return dpaa2_dev_rx_parse_slow(annotation); } static inline struct rte_mbuf *__attribute__((hot)) eth_sg_fd_to_mbuf(const struct qbman_fd *fd) { struct qbman_sge *sgt, *sge; - dma_addr_t sg_addr; + size_t sg_addr, fd_addr; int i = 0; - uint64_t fd_addr; struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; - fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); /* Get Scatter gather table address */ sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); sge = &sgt[i++]; - sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); + sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); /* First Scatter gather entry */ first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, @@ -243,14 +240,14 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd) DPAA2_GET_FD_FRC_PARSE_SUM(fd)); else first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, - (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) - + DPAA2_FD_PTA_SIZE); + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); rte_mbuf_refcnt_set(first_seg, 1); cur_seg = first_seg; while (!DPAA2_SG_IS_FINAL(sge)) { sge = &sgt[i++]; - sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR( + sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( DPAA2_GET_FLE_ADDR(sge)); next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); @@ -299,11 +296,11 @@ eth_fd_to_mbuf(const struct qbman_fd *fd) dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd)); else mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, - (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) - + DPAA2_FD_PTA_SIZE); + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); - PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," - "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n", + DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," + "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", mbuf, mbuf->buf_addr, mbuf->data_off, DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, @@ -320,15 +317,9 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, struct qbman_sge *sgt, *sge = NULL; int i; - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - int ret = rte_vlan_insert(&mbuf); - if (ret) - return ret; - } - temp = rte_pktmbuf_alloc(mbuf->pool); if (temp == NULL) { - PMD_TX_LOG(ERR, "No memory to allocate S/G table"); + DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); return -ENOMEM; } @@ -340,7 +331,7 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); /*Set Scatter gather table and Scatter gather entries*/ sgt = (struct qbman_sge *)( - (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + DPAA2_GET_FD_OFFSET(fd)); for (i = 0; i < mbuf->nb_segs; i++) { @@ -392,17 +383,10 @@ static void __attribute__ ((noinline)) __attribute__((hot)) eth_mbuf_to_fd(struct rte_mbuf *mbuf, struct qbman_fd *fd, uint16_t bpid) { - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - if (rte_vlan_insert(&mbuf)) { - rte_pktmbuf_free(mbuf); - return; - } - } - DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); - PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d," - "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n", + DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," + "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", mbuf, mbuf->buf_addr, mbuf->data_off, DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, @@ -431,15 +415,9 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, struct rte_mbuf *m; void *mb = NULL; - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - int ret = rte_vlan_insert(&mbuf); - if (ret) - return ret; - } - if (rte_dpaa2_mbuf_alloc_bulk( rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { - PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer"); + DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); return -1; } m = (struct rte_mbuf *)mb; @@ -455,17 +433,18 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); - PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p", - (void *)mbuf, mbuf->buf_addr); - - PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d", - DPAA2_GET_FD_ADDR(fd), + DPAA2_PMD_DP_DEBUG( + "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," + " meta: %d, off: %d, len: %d\n", + (void *)mbuf, + mbuf->buf_addr, + DPAA2_GET_FD_ADDR(fd), DPAA2_GET_FD_BPID(fd), rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); - return 0; +return 0; } uint16_t @@ -483,14 +462,15 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; struct rte_eth_dev *dev = dpaa2_q->dev; - if (unlikely(!DPAA2_PER_LCORE_DPIO)) { - ret = dpaa2_affine_qbman_swp(); + if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { + ret = dpaa2_affine_qbman_ethrx_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_PMD_ERR("Failure in affining portal"); return 0; } } - swp = DPAA2_PER_LCORE_PORTAL; + swp = DPAA2_PER_LCORE_ETHRX_PORTAL; + if (unlikely(!q_storage->active_dqs)) { q_storage->toggle = 0; dq_storage = q_storage->dq_storage[q_storage->toggle]; @@ -501,30 +481,32 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) q_storage->last_num_pkts); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage, - (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); - if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { while (!qbman_check_command_complete( - get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) + get_swp_active_dqs( + DPAA2_PER_LCORE_ETHRX_DPIO->index))) ; - clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); } while (1) { if (qbman_swp_pull(swp, &pulldesc)) { - PMD_RX_LOG(WARNING, "VDQ command is not issued." - "QBMAN is busy\n"); + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + " QBMAN is busy (1)\n"); /* Portal was busy, try again */ continue; } break; } q_storage->active_dqs = dq_storage; - q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; - set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage); + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, + dq_storage); } dq_storage = q_storage->active_dqs; - rte_prefetch0((void *)((uint64_t)(dq_storage))); - rte_prefetch0((void *)((uint64_t)(dq_storage + 1))); + rte_prefetch0((void *)(size_t)(dq_storage)); + rte_prefetch0((void *)(size_t)(dq_storage + 1)); /* Prepare next pull descriptor. This will give space for the * prefething done on DQRR entries @@ -535,7 +517,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage1, - (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); /* Check if the previous issued command is completed. * Also seems like the SWP is shared between the Ethernet Driver @@ -554,7 +536,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) */ while (!qbman_check_new_result(dq_storage)) ; - rte_prefetch0((void *)((uint64_t)(dq_storage + 2))); + rte_prefetch0((void *)((size_t)(dq_storage + 2))); /* Check whether Last Pull command is Expired and * setting Condition for Loop termination */ @@ -569,7 +551,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) next_fd = qbman_result_DQ_fd(dq_storage + 1); /* Prefetch Annotation address for the parse results */ - rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(next_fd) + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd) + DPAA2_FD_PTA_SIZE + 16)); if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) @@ -578,31 +560,31 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) bufs[num_rx] = eth_fd_to_mbuf(fd); bufs[num_rx]->port = dev->data->port_id; - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) rte_vlan_strip(bufs[num_rx]); dq_storage++; num_rx++; } while (pending); - if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { while (!qbman_check_command_complete( - get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) + get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) ; - clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); } /* issue a volatile dequeue command for next pull */ while (1) { if (qbman_swp_pull(swp, &pulldesc)) { - PMD_RX_LOG(WARNING, "VDQ command is not issued." - "QBMAN is busy\n"); + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + "QBMAN is busy (2)\n"); continue; } break; } q_storage->active_dqs = dq_storage1; - q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; - set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1); + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); dpaa2_q->rx_pkts += num_rx; @@ -616,7 +598,7 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp, struct dpaa2_queue *rxq, struct rte_event *ev) { - rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) + + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + DPAA2_FD_PTA_SIZE + 16)); ev->flow_id = rxq->ev.flow_id; @@ -641,7 +623,7 @@ dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), { uint8_t dqrr_index; - rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) + + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + DPAA2_FD_PTA_SIZE + 16)); ev->flow_id = rxq->ev.flow_id; @@ -686,13 +668,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_PMD_ERR("Failure in affining portal"); return 0; } } swp = DPAA2_PER_LCORE_PORTAL; - PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid); + DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid); /*Prepare enqueue descriptor*/ qbman_eq_desc_clear(&eqdesc); @@ -726,7 +708,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) fd_arr[loop].simple.frc = 0; DPAA2_RESET_FD_CTRL((&fd_arr[loop])); - DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL); + DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL); if (likely(RTE_MBUF_DIRECT(*bufs))) { mp = (*bufs)->pool; /* Check the basic scenario and set @@ -736,8 +718,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) priv->bp_list->dpaa2_ops_index && (*bufs)->nb_segs == 1 && rte_mbuf_refcnt_read((*bufs)) == 1)) { - if (unlikely((*bufs)->ol_flags - & PKT_TX_VLAN_PKT)) { + if (unlikely(((*bufs)->ol_flags + & PKT_TX_VLAN_PKT) || + (dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { ret = rte_vlan_insert(bufs); if (ret) goto send_n_return; @@ -753,19 +737,26 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } /* Not a hw_pkt pool allocated frame */ if (unlikely(!mp || !priv->bp_list)) { - PMD_TX_LOG(ERR, "err: no bpool attached"); + DPAA2_PMD_ERR("Err: No buffer pool attached"); goto send_n_return; } + if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || + (dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { + int ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { - PMD_TX_LOG(ERR, "non hw offload bufffer "); + DPAA2_PMD_WARN("Non DPAA2 buffer pool"); /* alloc should be from the default buffer pool * attached to this interface */ bpid = priv->bp_list->buf_pool.bpid; if (unlikely((*bufs)->nb_segs > 1)) { - PMD_TX_LOG(ERR, "S/G support not added" + DPAA2_PMD_ERR("S/G support not added" " for non hw offload buffer"); goto send_n_return; } diff --git a/drivers/net/dpaa2/meson.build b/drivers/net/dpaa2/meson.build new file mode 100644 index 00000000..213f0d72 --- /dev/null +++ b/drivers/net/dpaa2/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['mempool_dpaa2'] +sources = files('base/dpaa2_hw_dpni.c', + 'dpaa2_ethdev.c', + 'dpaa2_rxtx.c', + 'mc/dpkg.c', + 'mc/dpni.c') + +includes += include_directories('base', 'mc') + +# depends on fslmc bus which uses experimental API +allow_experimental_apis = true diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile index ba81a1f4..9c87e883 100644 --- a/drivers/net/e1000/Makefile +++ b/drivers/net/e1000/Makefile @@ -22,7 +22,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 177 -diag-disable 181 +CFLAGS_BASE_DRIVER += -diag-disable 869 -diag-disable 2259 else # # CFLAGS for gcc/clang @@ -61,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_i210.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_ich8lan.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_logs.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c index 15c7dd84..da1a9a70 100644 --- a/drivers/net/e1000/base/e1000_82575.c +++ b/drivers/net/e1000/base/e1000_82575.c @@ -312,6 +312,9 @@ STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw) phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_none; + break; default: ret_val = -E1000_ERR_PHY; goto out; @@ -1607,6 +1610,8 @@ STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) case e1000_phy_82580: ret_val = e1000_copper_link_setup_82577(hw); break; + case e1000_phy_none: + break; default: ret_val = -E1000_ERR_PHY; break; diff --git a/drivers/net/e1000/base/e1000_defines.h b/drivers/net/e1000/base/e1000_defines.h index dbc2bbbe..e2101c17 100644 --- a/drivers/net/e1000/base/e1000_defines.h +++ b/drivers/net/e1000/base/e1000_defines.h @@ -1274,6 +1274,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I350_I_PHY_ID 0x015403B0 #define I210_I_PHY_ID 0x01410C00 #define IGP04E1000_E_PHY_ID 0x02A80391 +#define BCM54616_E_PHY_ID 0x03625D10 #define M88_VENDOR 0x0141 /* M88E1000 Specific Registers */ diff --git a/drivers/net/e1000/base/e1000_phy.h b/drivers/net/e1000/base/e1000_phy.h index 3e45a9ef..2cd0e14b 100644 --- a/drivers/net/e1000/base/e1000_phy.h +++ b/drivers/net/e1000/base/e1000_phy.h @@ -330,4 +330,12 @@ struct sfp_e1000_flags { #define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 #define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP 0x5C +#define IGB_SFF_8472_COMP 0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE 0x4 +#define IGB_SFF_8472_UNSUP 0x00 + #endif diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index 23b089c8..902001f3 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -4,6 +4,10 @@ #ifndef _E1000_ETHDEV_H_ #define _E1000_ETHDEV_H_ + +#include + +#include #include #include @@ -27,6 +31,7 @@ #define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */ #define IGB_VFTA_SIZE 128 +#define IGB_HKEY_MAX_INDEX 10 #define IGB_MAX_RX_QUEUE_NUM 8 #define IGB_MAX_RX_QUEUE_NUM_82576 16 @@ -229,8 +234,8 @@ struct igb_ethertype_filter { }; struct igb_rte_flow_rss_conf { - struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */ - uint16_t num; /**< Number of entries in queue[]. */ + struct rte_flow_action_rss conf; /**< RSS parameters. */ + uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ }; @@ -357,6 +362,9 @@ void eth_igb_rx_queue_release(void *rxq); void igb_dev_clear_queues(struct rte_eth_dev *dev); void igb_dev_free_queues(struct rte_eth_dev *dev); +uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, @@ -370,6 +378,9 @@ int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset); int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset); int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset); +uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); @@ -417,6 +428,8 @@ void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +uint32_t em_get_max_pktlen(struct rte_eth_dev *dev); + /* * RX/TX EM function prototypes */ @@ -426,6 +439,9 @@ void eth_em_rx_queue_release(void *rxq); void em_dev_clear_queues(struct rte_eth_dev *dev); void em_dev_free_queues(struct rte_eth_dev *dev); +uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, @@ -439,6 +455,9 @@ int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset); int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset); int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset); +uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); @@ -487,6 +506,10 @@ int eth_igb_syn_filter_set(struct rte_eth_dev *dev, int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, struct rte_eth_flex_filter *filter, bool add); +int igb_rss_conf_init(struct igb_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int igb_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); int igb_config_rss_filter(struct rte_eth_dev *dev, struct igb_rte_flow_rss_conf *conf, bool add); diff --git a/drivers/net/e1000/e1000_logs.c b/drivers/net/e1000/e1000_logs.c new file mode 100644 index 00000000..22173939 --- /dev/null +++ b/drivers/net/e1000/e1000_logs.c @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include "e1000_logs.h" + +/* declared as extern in e1000_logs.h */ +int e1000_logtype_init; +int e1000_logtype_driver; + +/* avoids double registering of logs if EM and IGB drivers are in use */ +static int e1000_log_initialized; + +void +e1000_igb_init_log(void) +{ + if (!e1000_log_initialized) { + e1000_logtype_init = rte_log_register("pmd.net.e1000.init"); + if (e1000_logtype_init >= 0) + rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE); + e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver"); + if (e1000_logtype_driver >= 0) + rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE); + e1000_log_initialized = 1; + } +} diff --git a/drivers/net/e1000/e1000_logs.h b/drivers/net/e1000/e1000_logs.h index 50348e9e..69d3d311 100644 --- a/drivers/net/e1000/e1000_logs.h +++ b/drivers/net/e1000/e1000_logs.h @@ -5,6 +5,8 @@ #ifndef _E1000_LOGS_H_ #define _E1000_LOGS_H_ +#include + extern int e1000_logtype_init; #define PMD_INIT_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, e1000_logtype_init, \ @@ -41,4 +43,8 @@ extern int e1000_logtype_driver; #define PMD_DRV_LOG(level, fmt, args...) \ PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +/* log init function shared by e1000 and igb drivers */ +void e1000_igb_init_log(void); + #endif /* _E1000_LOGS_H_ */ diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c index 242375ff..7039dc10 100644 --- a/drivers/net/e1000/em_ethdev.c +++ b/drivers/net/e1000/em_ethdev.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -20,7 +19,6 @@ #include #include #include -#include #include #include @@ -94,6 +92,8 @@ static int em_get_rx_buffer_size(struct e1000_hw *hw); static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); +static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *addr); static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, @@ -105,9 +105,6 @@ static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, static enum e1000_fc_mode em_fc_setting = e1000_fc_full; -int e1000_logtype_init; -int e1000_logtype_driver; - /* * The set of PCI devices this driver supports */ @@ -190,6 +187,7 @@ static const struct eth_dev_ops eth_em_ops = { .dev_led_off = eth_em_led_off, .flow_ctrl_get = eth_em_flow_ctrl_get, .flow_ctrl_set = eth_em_flow_ctrl_set, + .mac_addr_set = eth_em_default_mac_addr_set, .mac_addr_add = eth_em_rar_set, .mac_addr_remove = eth_em_rar_clear, .set_mc_addr_list = eth_em_set_mc_addr_list, @@ -197,57 +195,6 @@ static const struct eth_dev_ops eth_em_ops = { .txq_info_get = em_txq_info_get, }; -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} /** * eth_em_dev_is_ich8 - Check for ICH8 device @@ -506,6 +453,7 @@ eth_em_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + PMD_INIT_FUNC_TRACE(); return 0; @@ -802,7 +750,7 @@ eth_em_stop(struct rte_eth_dev *dev) /* clear the recorded link status */ memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -1069,9 +1017,11 @@ eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queu return 0; } -static uint32_t -em_get_max_pktlen(const struct e1000_hw *hw) +uint32_t +em_get_max_pktlen(struct rte_eth_dev *dev) { + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + switch (hw->mac.type) { case e1000_82571: case e1000_82572: @@ -1100,20 +1050,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ - dev_info->max_rx_pktlen = em_get_max_pktlen(hw); + dev_info->max_rx_pktlen = em_get_max_pktlen(dev); dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM; /* * Starting with 631xESB hw supports 2 TX/RX queues per port. @@ -1135,6 +1074,13 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = 1; dev_info->max_tx_queues = 1; + dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = E1000_MAX_RING_DESC, .nb_min = E1000_MIN_RING_DESC, @@ -1152,6 +1098,12 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; + + /* Preferred queue parameters */ + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = 256; + dev_info->default_rxportconf.ring_size = 256; } /* return 0 means link status changed, -1 means not changed */ @@ -1160,7 +1112,7 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; int link_check, count; link_check = 0; @@ -1195,8 +1147,6 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); } memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_read_link_status(dev, &link); - old = link; /* Now we check if a transition has happened */ if (link_check && (link.link_status == ETH_LINK_DOWN)) { @@ -1210,19 +1160,13 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); } else if (!link_check && (link.link_status == ETH_LINK_UP)) { - link.link_speed = 0; + link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_status = ETH_LINK_DOWN; link.link_autoneg = ETH_LINK_FIXED; } - rte_em_dev_atomic_write_link_status(dev, &link); - /* not changed */ - if (old.link_status == link.link_status) - return -1; - - /* changed */ - return 0; + return rte_eth_linkstatus_set(dev, &link); } /* @@ -1460,15 +1404,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev) static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; if(mask & ETH_VLAN_STRIP_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) em_vlan_hw_strip_enable(dev); else em_vlan_hw_strip_disable(dev); } if(mask & ETH_VLAN_FILTER_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) em_vlan_hw_filter_enable(dev); else em_vlan_hw_filter_disable(dev); @@ -1631,8 +1578,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, if (ret < 0) return 0; - memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", dev->data->port_id, link.link_speed, @@ -1809,6 +1756,15 @@ eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) e1000_rar_set(hw, addr, index); } +static int +eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *addr) +{ + eth_em_rar_clear(dev, 0); + + return eth_em_rar_set(dev, (void *)addr, 0, 0); +} + static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { @@ -1835,10 +1791,12 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; rctl |= E1000_RCTL_LPE; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; rctl &= ~E1000_RCTL_LPE; } E1000_WRITE_REG(hw, E1000_RCTL, rctl); @@ -1864,14 +1822,10 @@ RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); -RTE_INIT(e1000_init_log); +/* see e1000_logs.c */ +RTE_INIT(igb_init_log); static void -e1000_init_log(void) +igb_init_log(void) { - e1000_logtype_init = rte_log_register("pmd.net.e1000.init"); - if (e1000_logtype_init >= 0) - rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE); - e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver"); - if (e1000_logtype_driver >= 0) - rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE); + e1000_igb_init_log(); } diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 02fae100..a6b3e92a 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -85,6 +85,7 @@ struct em_rx_queue { struct em_rx_entry *sw_ring; /**< address of RX software ring. */ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */ uint16_t nb_rx_desc; /**< number of RX descriptors. */ uint16_t rx_tail; /**< current value of RDT register. */ uint16_t nb_rx_hold; /**< number of held free RX desc. */ @@ -163,6 +164,7 @@ struct em_tx_queue { uint8_t wthresh; /**< Write-back threshold register. */ struct em_ctx_info ctx_cache; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -1151,6 +1153,36 @@ em_reset_tx_queue(struct em_tx_queue *txq) memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache)); } +uint64_t +em_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + return tx_offload_capa; +} + +uint64_t +em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_queue_offload_capa; + + /* + * As only one Tx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev); + + return tx_queue_offload_capa; +} + int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1163,9 +1195,12 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, struct e1000_hw *hw; uint32_t tsize; uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1269,6 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, em_reset_tx_queue(txq); dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; return 0; } @@ -1313,6 +1349,43 @@ em_reset_rx_queue(struct em_rx_queue *rxq) rxq->pkt_last_seg = NULL; } +uint64_t +em_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + uint32_t max_rx_pktlen; + + max_rx_pktlen = em_get_max_pktlen(dev); + + rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER; + if (max_rx_pktlen > ETHER_MAX_LEN) + rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + + return rx_offload_capa; +} + +uint64_t +em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_queue_offload_capa; + + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev); + + return rx_queue_offload_capa; +} + int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1325,9 +1398,12 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, struct em_rx_queue *rxq; struct e1000_hw *hw; uint32_t rsize; + uint64_t offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1382,8 +1458,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx)); @@ -1395,6 +1471,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, dev->data->rx_queues[queue_idx] = rxq; em_reset_rx_queue(rxq); + rxq->offloads = offloads; return 0; } @@ -1646,6 +1723,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) { struct e1000_hw *hw; struct em_rx_queue *rxq; + struct rte_eth_rxmode *rxmode; uint32_t rctl; uint32_t rfctl; uint32_t rxcsum; @@ -1654,6 +1732,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) int ret; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rxmode = &dev->data->dev_conf.rxmode; /* * Make sure receives are disabled while setting @@ -1714,8 +1793,8 @@ eth_em_rx_init(struct rte_eth_dev *dev) * call to configure */ rxq->crc_len = - (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? - 0 : ETHER_CRC_LEN); + (uint8_t)(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN); bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(i), @@ -1745,7 +1824,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) * to avoid splitting packets that don't fit into * one buffer. */ - if (dev->data->dev_conf.rxmode.jumbo_frame || + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME || rctl_bsize < ETHER_MAX_LEN) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); @@ -1755,7 +1834,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) } } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_em_recv_scattered_pkts; @@ -1768,7 +1847,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) */ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) rxcsum |= E1000_RXCSUM_IPOFL; else rxcsum &= ~E1000_RXCSUM_IPOFL; @@ -1780,21 +1859,21 @@ eth_em_rx_init(struct rte_eth_dev *dev) if ((hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_pch2lan || hw->mac.type == e1000_ich10lan) && - dev->data->dev_conf.rxmode.jumbo_frame == 1) { + rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13)); } if (hw->mac.type == e1000_pch2lan) { - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); else e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); } /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ else rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ @@ -1814,7 +1893,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) /* * Configure support of jumbo frames, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) rctl |= E1000_RCTL_LPE; else rctl &= ~E1000_RCTL_LPE; @@ -1894,6 +1973,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->scattered_rx = dev->data->scattered_rx; qinfo->nb_desc = rxq->nb_rx_desc; qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.offloads = rxq->offloads; } void @@ -1911,4 +1991,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.wthresh = txq->wthresh; qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.offloads = txq->offloads; } diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 3c5138de..edc7be31 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include @@ -42,8 +41,6 @@ #define IGB_DEFAULT_TX_HTHRESH 1 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16) -#define IGB_HKEY_MAX_INDEX 10 - /* Bit shift and mask */ #define IGB_4_BIT_WIDTH (CHAR_BIT / 2) #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) @@ -146,7 +143,7 @@ static int eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); -static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, +static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr); static void igbvf_intr_disable(struct e1000_hw *hw); @@ -171,7 +168,7 @@ static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); -static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev, +static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr); static int igbvf_get_reg_length(struct rte_eth_dev *dev); static int igbvf_get_regs(struct rte_eth_dev *dev, @@ -224,6 +221,10 @@ static int eth_igb_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); static int eth_igb_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); +static int eth_igb_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, uint32_t nb_mc_addr); @@ -403,6 +404,8 @@ static const struct eth_dev_ops eth_igb_ops = { .get_eeprom_length = eth_igb_get_eeprom_length, .get_eeprom = eth_igb_get_eeprom, .set_eeprom = eth_igb_set_eeprom, + .get_module_info = eth_igb_get_module_info, + .get_module_eeprom = eth_igb_get_module_eeprom, .timesync_adjust_time = igb_timesync_adjust_time, .timesync_read_time = igb_timesync_read_time, .timesync_write_time = igb_timesync_write_time, @@ -522,57 +525,6 @@ static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = { #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \ sizeof(rte_igbvf_stats_strings[0])) -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} static inline void igb_intr_enable(struct rte_eth_dev *dev) @@ -1559,7 +1511,7 @@ eth_igb_stop(struct rte_eth_dev *dev) /* clear the recorded link status */ memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -1635,7 +1587,7 @@ eth_igb_close(struct rte_eth_dev *dev) } memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); } static int @@ -2196,22 +2148,15 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; switch (hw->mac.type) { case e1000_82575: @@ -2274,6 +2219,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2282,7 +2228,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .hthresh = IGB_DEFAULT_TX_HTHRESH, .wthresh = IGB_DEFAULT_TX_WTHRESH, }, - .txq_flags = 0, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -2325,14 +2271,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | @@ -2353,6 +2294,13 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) break; } + dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = IGB_DEFAULT_RX_PTHRESH, @@ -2361,6 +2309,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2369,7 +2318,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .hthresh = IGB_DEFAULT_TX_HTHRESH, .wthresh = IGB_DEFAULT_TX_WTHRESH, }, - .txq_flags = 0, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -2382,7 +2331,7 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; int link_check, count; link_check = 0; @@ -2423,8 +2372,6 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); } memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_read_link_status(dev, &link); - old = link; /* Now we check if a transition has happened */ if (link_check) { @@ -2443,14 +2390,8 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_status = ETH_LINK_DOWN; link.link_autoneg = ETH_LINK_FIXED; } - rte_igb_dev_atomic_write_link_status(dev, &link); - /* not changed */ - if (old.link_status == link.link_status) - return -1; - - /* changed */ - return 0; + return rte_eth_linkstatus_set(dev, &link); } /* @@ -2704,7 +2645,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* Update maximum packet length */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) E1000_WRITE_REG(hw, E1000_RLPML, dev->data->dev_conf.rxmode.max_rx_pkt_len + VLAN_TAG_SIZE); @@ -2723,7 +2664,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* Update maximum packet length */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) E1000_WRITE_REG(hw, E1000_RLPML, dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * VLAN_TAG_SIZE); @@ -2732,22 +2673,25 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; if(mask & ETH_VLAN_STRIP_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) igb_vlan_hw_strip_enable(dev); else igb_vlan_hw_strip_disable(dev); } if(mask & ETH_VLAN_FILTER_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) igb_vlan_hw_filter_enable(dev); else igb_vlan_hw_filter_disable(dev); } if(mask & ETH_VLAN_EXTEND_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) igb_vlan_hw_extend_enable(dev); else igb_vlan_hw_extend_disable(dev); @@ -2887,8 +2831,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev, if (ret < 0) return 0; - memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); if (link.link_status) { PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", @@ -3146,13 +3089,14 @@ eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) e1000_rar_set(hw, addr, index); } -static void +static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) { eth_igb_rar_clear(dev, 0); - eth_igb_rar_set(dev, (void *)addr, 0, 0); + + return 0; } /* * Virtual Function operations @@ -3250,14 +3194,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev) * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC - if (!conf->rxmode.hw_strip_crc) { + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 1; + conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; } #else - if (conf->rxmode.hw_strip_crc) { + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 0; + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; } #endif @@ -3504,7 +3448,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return 0; } -static void +static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) { struct e1000_hw *hw = @@ -3512,6 +3456,7 @@ igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) /* index is not used by rar_set() */ hw->mac.ops.rar_set(hw, (void *)addr, 0); + return 0; } @@ -4499,10 +4444,12 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; rctl |= E1000_RCTL_LPE; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; rctl &= ~E1000_RCTL_LPE; } E1000_WRITE_REG(hw, E1000_RCTL, rctl); @@ -5383,6 +5330,86 @@ eth_igb_set_eeprom(struct rte_eth_dev *dev, return nvm->ops.write(hw, first, length, data); } +static int +eth_igb_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + uint32_t status = 0; + uint16_t sff8472_rev, addr_mode; + bool page_swap = false; + + if (hw->phy.media_type == e1000_media_type_copper || + hw->phy.media_type == e1000_media_type_unknown) + return -EOPNOTSUPP; + + /* Check whether we support SFF-8472 or not */ + status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); + if (status) + return -EIO; + + /* addressing mode is not supported */ + status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); + if (status) + return -EIO; + + /* addressing mode is not supported */ + if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " + "type to the driver maintainers.\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { + /* We have an SFP, but it does not support SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int +eth_igb_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + uint32_t status = 0; + uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1]; + u16 first_word, last_word; + int i = 0; + + if (info->length == 0) + return -EINVAL; + + first_word = info->offset >> 1; + last_word = (info->offset + info->length - 1) >> 1; + + /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ + for (i = 0; i < last_word - first_word + 1; i++) { + status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2, + &dataword[i]); + if (status) { + /* Error occurred while reading module */ + return -EIO; + } + + dataword[i] = rte_be_to_cpu_16(dataword[i]); + } + + memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length); + + return 0; +} + static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { @@ -5631,7 +5658,7 @@ igb_rss_filter_restore(struct rte_eth_dev *dev) struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) igb_config_rss_filter(dev, &filter_info->rss_info, TRUE); } @@ -5654,3 +5681,11 @@ RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci"); + +/* see e1000_logs.c */ +RTE_INIT(e1000_init_log); +static void +e1000_init_log(void) +{ + e1000_igb_init_log(); +} diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c index a1427596..07385291 100644 --- a/drivers/net/e1000/igb_flow.c +++ b/drivers/net/e1000/igb_flow.c @@ -175,7 +175,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; /** * Only support src & dst addresses, protocol, * others should be masked. @@ -198,7 +198,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->src_ip_mask = ipv4_mask->hdr.src_addr; filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; filter->dst_ip = ipv4_spec->hdr.dst_addr; filter->src_ip = ipv4_spec->hdr.src_addr; filter->proto = ipv4_spec->hdr.next_proto_id; @@ -228,7 +228,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* get the TCP/UDP/SCTP info */ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { if (item->spec && item->mask) { - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; /** * Only support src & dst ports, tcp flags, @@ -263,14 +263,14 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; } } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { if (item->spec && item->mask) { - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; /** * Only support src & dst ports, @@ -289,14 +289,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; } } else { if (item->spec && item->mask) { - sctp_mask = (const struct rte_flow_item_sctp *) - item->mask; + sctp_mask = item->mask; /** * Only support src & dst ports, @@ -380,6 +379,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -533,8 +541,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Mask bits of source MAC address must be full of 0. * Mask bits of destination MAC address must be full @@ -624,6 +632,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, @@ -848,8 +864,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port || @@ -924,6 +940,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Support 2 priorities, the lowest or highest. */ if (!attr->priority) { filter->hig_pri = 0; @@ -1065,8 +1090,8 @@ item_loop: return -rte_errno; } - raw_spec = (const struct rte_flow_item_raw *)item->spec; - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_spec = item->spec; + raw_mask = item->mask; if (!raw_mask->length || !raw_mask->relative) { @@ -1212,6 +1237,15 @@ item_loop: return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_flex_filter)); rte_flow_error_set(error, EINVAL, @@ -1293,7 +1327,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, rss = (const struct rte_flow_action_rss *)act->conf; - if (!rss || !rss->num) { + if (!rss || !rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -1301,7 +1335,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } - for (n = 0; n < rss->num; n++) { + for (n = 0; n < rss->queue_num; n++) { if (rss->queue[n] >= dev->data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -1311,14 +1345,26 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, } } - if (rss->rss_conf) - rss_conf->rss_conf = *rss->rss_conf; - else - rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL; - - for (n = 0; n < rss->num; ++n) - rss_conf->queue[n] = rss->queue[n]; - rss_conf->num = rss->num; + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (igb_rss_conf_init(rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); /* check if the next not void item is END */ index++; @@ -1350,6 +1396,15 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); rte_flow_error_set(error, EINVAL, @@ -1519,9 +1574,8 @@ igb_flow_create(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "failed to allocate memory"); goto out; } - rte_memcpy(&rss_filter_ptr->filter_info, - &rss_conf, - sizeof(struct igb_rte_flow_rss_conf)); + igb_rss_conf_init(&rss_filter_ptr->filter_info, + &rss_conf.conf); TAILQ_INSERT_TAIL(&igb_filter_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; @@ -1758,7 +1812,7 @@ igb_clear_rss_filter(struct rte_eth_dev *dev) struct e1000_filter_info *filter = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter->rss_info.num) + if (filter->rss_info.conf.queue_num) igb_config_rss_filter(dev, &filter->rss_info, FALSE); } diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 2f371672..5f729f27 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -107,6 +107,7 @@ struct igb_rx_queue { uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ uint32_t flags; /**< RX flags. */ + uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */ }; /** @@ -180,6 +181,7 @@ struct igb_tx_queue { /**< Start context position for transmit queue. */ struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -1447,6 +1449,33 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) igb_reset_tx_queue_stat(txq); } +uint64_t +igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + + RTE_SET_USED(dev); + rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + return rx_offload_capa; +} + +uint64_t +igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_queue_offload_capa; + + rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); + + return rx_queue_offload_capa; +} + int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1458,6 +1487,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, struct igb_tx_queue *txq; struct e1000_hw *hw; uint32_t size; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1542,6 +1574,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, dev->tx_pkt_burst = eth_igb_xmit_pkts; dev->tx_pkt_prepare = ð_igb_prep_pkts; dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; return 0; } @@ -1593,6 +1626,45 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq) rxq->pkt_last_seg = NULL; } +uint64_t +igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + + RTE_SET_USED(dev); + rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER; + + return rx_offload_capa; +} + +uint64_t +igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_queue_offload_capa; + + switch (hw->mac.type) { + case e1000_vfadapt_i350: + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev); + break; + default: + rx_queue_offload_capa = 0; + } + return rx_queue_offload_capa; +} + int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1605,6 +1677,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, struct igb_rx_queue *rxq; struct e1000_hw *hw; unsigned int size; + uint64_t offloads; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1630,6 +1705,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (rxq == NULL) return -ENOMEM; + rxq->offloads = offloads; rxq->mb_pool = mp; rxq->nb_rx_desc = nb_desc; rxq->pthresh = rx_conf->rx_thresh.pthresh; @@ -1644,8 +1720,8 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : - ETHER_CRC_LEN); + rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -2227,6 +2303,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev) int eth_igb_rx_init(struct rte_eth_dev *dev) { + struct rte_eth_rxmode *rxmode; struct e1000_hw *hw; struct igb_rx_queue *rxq; uint32_t rctl; @@ -2247,10 +2324,12 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + rxmode = &dev->data->dev_conf.rxmode; + /* * Configure support of jumbo frames, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { rctl |= E1000_RCTL_LPE; /* @@ -2292,9 +2371,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = - (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? - 0 : ETHER_CRC_LEN); + rxq->crc_len = (uint8_t)(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP ? 0 : ETHER_CRC_LEN); bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx), @@ -2362,7 +2440,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl); } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; @@ -2406,16 +2484,24 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxcsum |= E1000_RXCSUM_PCSD; /* Enable both L3/L4 rx checksum offload */ - if (dev->data->dev_conf.rxmode.hw_ip_checksum) - rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | - E1000_RXCSUM_CRCOFL); + if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rxcsum |= E1000_RXCSUM_IPOFL; + else + rxcsum &= ~E1000_RXCSUM_IPOFL; + if (rxmode->offloads & + (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) + rxcsum |= E1000_RXCSUM_TUOFL; else - rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | - E1000_RXCSUM_CRCOFL); + rxcsum &= ~E1000_RXCSUM_TUOFL; + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + rxcsum |= E1000_RXCSUM_CRCOFL; + else + rxcsum &= ~E1000_RXCSUM_CRCOFL; + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) { rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ /* set STRCRC bit in all queues */ @@ -2654,7 +2740,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; @@ -2741,6 +2827,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.offloads = rxq->offloads; } void @@ -2756,6 +2843,41 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.pthresh = txq->pthresh; qinfo->conf.tx_thresh.hthresh = txq->hthresh; qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.offloads = txq->offloads; +} + +int +igb_rss_conf_init(struct igb_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +igb_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); } int @@ -2764,7 +2886,12 @@ igb_config_rss_filter(struct rte_eth_dev *dev, { uint32_t shift; uint16_t i, j; - struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -2772,8 +2899,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev, hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!add) { - if (memcmp(conf, &filter_info->rss_info, - sizeof(struct igb_rte_flow_rss_conf)) == 0) { + if (igb_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { igb_rss_disable(dev); memset(&filter_info->rss_info, 0, sizeof(struct igb_rte_flow_rss_conf)); @@ -2782,7 +2909,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev, return -EINVAL; } - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) return -EINVAL; /* Fill in redirection table. */ @@ -2794,9 +2921,9 @@ igb_config_rss_filter(struct rte_eth_dev *dev, } reta; uint8_t q_idx; - q_idx = conf->queue[j]; - if (j == conf->num) + if (j == conf->conf.queue_num) j = 0; + q_idx = conf->conf.queue[j]; reta.bytes[i & 3] = (uint8_t)(q_idx << shift); if ((i & 3) == 3) E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); @@ -2813,8 +2940,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev, rss_conf.rss_key = rss_intel_key; /* Default hash key */ igb_hw_rss_hash_set(hw, &rss_conf); - rte_memcpy(&filter_info->rss_info, - conf, sizeof(struct igb_rte_flow_rss_conf)); + if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf)) + return -EINVAL; return 0; } diff --git a/drivers/net/e1000/meson.build b/drivers/net/e1000/meson.build index 3a1bf5af..cf456995 100644 --- a/drivers/net/e1000/meson.build +++ b/drivers/net/e1000/meson.build @@ -5,6 +5,7 @@ subdir('base') objs = [base_objs] sources = files( + 'e1000_logs.c', 'em_ethdev.c', 'em_rxtx.c', 'igb_ethdev.c', diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile index f9bfe053..43339f3b 100644 --- a/drivers/net/ena/Makefile +++ b/drivers/net/ena/Makefile @@ -43,6 +43,9 @@ INCLUDES :=-I$(SRCDIR) -I$(SRCDIR)/base/ena_defs -I$(SRCDIR)/base EXPORT_MAP := rte_pmd_ena_version.map LIBABIVER := 1 +# rte_fbarray is not yet part of stable API +CFLAGS += -DALLOW_EXPERIMENTAL_API + VPATH += $(SRCDIR)/base # # all source are stored in SRCS-y diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h index 8cba319e..93345199 100644 --- a/drivers/net/ena/base/ena_plat_dpdk.h +++ b/drivers/net/ena/base/ena_plat_dpdk.h @@ -188,7 +188,8 @@ typedef uint64_t dma_addr_t; ENA_TOUCH(dmadev); ENA_TOUCH(handle); \ snprintf(z_name, sizeof(z_name), \ "ena_alloc_%d", ena_alloc_cnt++); \ - mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \ + mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, \ + RTE_MEMZONE_IOVA_CONTIG); \ memset(mz->addr, 0, size); \ virt = mz->addr; \ phys = mz->iova; \ @@ -206,7 +207,8 @@ typedef uint64_t dma_addr_t; ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \ snprintf(z_name, sizeof(z_name), \ "ena_alloc_%d", ena_alloc_cnt++); \ - mz = rte_memzone_reserve(z_name, size, node, 0); \ + mz = rte_memzone_reserve(z_name, size, node, \ + RTE_MEMZONE_IOVA_CONTIG); \ memset(mz->addr, 0, size); \ virt = mz->addr; \ phys = mz->iova; \ @@ -219,7 +221,8 @@ typedef uint64_t dma_addr_t; ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \ snprintf(z_name, sizeof(z_name), \ "ena_alloc_%d", ena_alloc_cnt++); \ - mz = rte_memzone_reserve(z_name, size, node, 0); \ + mz = rte_memzone_reserve(z_name, size, node, \ + RTE_MEMZONE_IOVA_CONTIG); \ memset(mz->addr, 0, size); \ virt = mz->addr; \ } while (0) diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 34b2a8d7..c595cc7e 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -238,10 +238,6 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); -static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads); -static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads); static const struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, @@ -264,11 +260,15 @@ static const struct eth_dev_ops ena_dev_ops = { static inline int ena_cpu_to_node(int cpu) { struct rte_config *config = rte_eal_get_configuration(); + struct rte_fbarray *arr = &config->mem_config->memzones; + const struct rte_memzone *mz; - if (likely(cpu < RTE_MAX_MEMZONE)) - return config->mem_config->memzone[cpu].socket_id; + if (unlikely(cpu >= RTE_MAX_MEMZONE)) + return NUMA_NO_NODE; - return NUMA_NO_NODE; + mz = rte_fbarray_get(arr, cpu); + + return mz->socket_id; } static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, @@ -724,7 +724,7 @@ static int ena_link_update(struct rte_eth_dev *dev, { struct rte_eth_link *link = &dev->data->dev_link; - link->link_status = 1; + link->link_status = ETH_LINK_UP; link->link_speed = ETH_SPEED_NUM_10G; link->link_duplex = ETH_LINK_FULL_DUPLEX; @@ -1001,12 +1001,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } - if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE && - !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) { - RTE_LOG(ERR, PMD, "Unsupported queue offloads\n"); - return -EINVAL; - } - ena_qid = ENA_IO_TXQ_IDX(queue_idx); ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; @@ -1061,7 +1055,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, for (i = 0; i < txq->ring_size; i++) txq->empty_tx_reqs[i] = i; - txq->offloads = tx_conf->offloads; + txq->offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; /* Store pointer to this queue in upper layer */ txq->configured = 1; @@ -1074,7 +1068,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, __rte_unused unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, + __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { struct ena_com_create_io_ctx ctx = @@ -1110,11 +1104,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } - if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) { - RTE_LOG(ERR, PMD, "Unsupported queue offloads\n"); - return -EINVAL; - } - ena_qid = ENA_IO_RXQ_IDX(queue_idx); ctx.qid = ena_qid; @@ -1418,22 +1407,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev) { struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); - uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; - uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; - - if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) { - RTE_LOG(ERR, PMD, "Some Tx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - tx_offloads, adapter->tx_supported_offloads); - return -ENOTSUP; - } - - if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) { - RTE_LOG(ERR, PMD, "Some Rx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - rx_offloads, adapter->rx_supported_offloads); - return -ENOTSUP; - } if (!(adapter->state == ENA_ADAPTER_STATE_INIT || adapter->state == ENA_ADAPTER_STATE_STOPPED)) { @@ -1455,8 +1428,8 @@ static int ena_dev_configure(struct rte_eth_dev *dev) break; } - adapter->tx_selected_offloads = tx_offloads; - adapter->rx_selected_offloads = rx_offloads; + adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; + adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; return 0; } @@ -1485,32 +1458,6 @@ static void ena_init_rings(struct ena_adapter *adapter) } } -static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads) -{ - uint64_t port_offloads = adapter->tx_selected_offloads; - - /* Check if port supports all requested offloads. - * True if all offloads selected for queue are set for port. - */ - if ((offloads & port_offloads) != offloads) - return false; - return true; -} - -static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter, - uint64_t offloads) -{ - uint64_t port_offloads = adapter->rx_selected_offloads; - - /* Check if port supports all requested offloads. - * True if all offloads selected for queue are set for port. - */ - if ((offloads & port_offloads) != offloads) - return false; - return true; -} - static void ena_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { @@ -1527,8 +1474,6 @@ static void ena_infos_get(struct rte_eth_dev *dev, ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c index 05b595eb..8483f76f 100644 --- a/drivers/net/enic/base/vnic_dev.c +++ b/drivers/net/enic/base/vnic_dev.c @@ -10,6 +10,7 @@ #include "vnic_dev.h" #include "vnic_resource.h" #include "vnic_devcmd.h" +#include "vnic_nic.h" #include "vnic_stats.h" @@ -484,7 +485,7 @@ int vnic_dev_capable_adv_filters(struct vnic_dev *vdev) * Retrun true in filter_tags if supported */ int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, - u8 *filter_tags) + u8 *filter_actions) { u64 args[4]; int err; @@ -492,14 +493,10 @@ int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, err = vnic_dev_advanced_filters_cap(vdev, args, 4); - /* determine if filter tags are available */ - if (err) - *filter_tags = 0; - if ((args[2] == FILTER_CAP_MODE_V1) && - (args[3] & FILTER_ACTION_FILTER_ID_FLAG)) - *filter_tags = 1; - else - *filter_tags = 0; + /* determine supported filter actions */ + *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */ + if (args[2] == FILTER_CAP_MODE_V1) + *filter_actions = args[3]; if (err || ((args[0] == 1) && (args[1] == 0))) { /* Adv filter Command not supported or adv filters available but @@ -587,17 +584,9 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) { u64 a0, a1; int wait = 1000; - static u32 instance; - char name[NAME_MAX]; - if (!vdev->stats) { - snprintf((char *)name, sizeof(name), - "vnic_stats-%u", instance++); - vdev->stats = vdev->alloc_consistent(vdev->priv, - sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name); - if (!vdev->stats) - return -ENOMEM; - } + if (!vdev->stats) + return -ENOMEM; *stats = vdev->stats; a0 = vdev->stats_pa; @@ -922,6 +911,18 @@ u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) return vdev->intr_coal_timer_info.max_usec; } +int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) +{ + char name[NAME_MAX]; + static u32 instance; + + snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++); + vdev->stats = vdev->alloc_consistent(vdev->priv, + sizeof(struct vnic_stats), + &vdev->stats_pa, (u8 *)name); + return vdev->stats == NULL ? -ENOMEM : 0; +} + void vnic_dev_unregister(struct vnic_dev *vdev) { if (vdev) { @@ -1044,3 +1045,36 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, return ret; } + +int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config) +{ + u64 a0 = overlay; + u64 a1 = config; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); +} + +int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, + u16 vxlan_udp_port_number) +{ + u64 a1 = vxlan_udp_port_number; + u64 a0 = overlay; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); +} + +int vnic_dev_capable_vxlan(struct vnic_dev *vdev) +{ + u64 a0 = VIC_FEATURE_VXLAN; + u64 a1 = 0; + int wait = 1000; + int ret; + + ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait); + /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */ + return ret == 0 && + (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) == + (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ); +} diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h index 8c099206..3c908430 100644 --- a/drivers/net/enic/base/vnic_dev.h +++ b/drivers/net/enic/base/vnic_dev.h @@ -108,7 +108,7 @@ int vnic_dev_fw_info(struct vnic_dev *vdev, int vnic_dev_capable_adv_filters(struct vnic_dev *vdev); int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd); int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode, - u8 *filter_tags); + u8 *filter_actions); int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev); int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, void *value); @@ -165,6 +165,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, unsigned int num_bars); struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev); +int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev); int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback); int vnic_dev_get_size(void); int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op); @@ -177,10 +178,9 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter_v2 *data, struct filter_action_v2 *action_v2); -#ifdef ENIC_VXLAN -int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev, +int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config); int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, u16 vxlan_udp_port_number); -#endif +int vnic_dev_capable_vxlan(struct vnic_dev *vdev); #endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h index 6b95bc48..2865eb4d 100644 --- a/drivers/net/enic/base/vnic_devcmd.h +++ b/drivers/net/enic/base/vnic_devcmd.h @@ -600,6 +600,7 @@ enum filter_cap_mode { /* flags for CMD_OPEN */ #define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ +#define CMD_OPENF_IG_DESCCACHE 0x2 /* Do not flush IG DESC cache */ /* flags for CMD_INIT */ #define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ @@ -840,7 +841,9 @@ struct filter_action { #define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0) #define FILTER_ACTION_FILTER_ID_FLAG (1 << 1) +#define FILTER_ACTION_DROP_FLAG (1 << 2) #define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \ + | FILTER_ACTION_DROP_FLAG \ | FILTER_ACTION_FILTER_ID_FLAG) /* Version 2 of filter action must be a strict extension of struct filter_action @@ -1077,6 +1080,18 @@ typedef enum { VIC_FEATURE_MAX, } vic_feature_t; +/* + * These flags are used in args[1] of devcmd CMD_GET_SUPP_FEATURE_VER + * to indicate the host driver about the VxLAN and Multi WQ features + * supported + */ +#define FEATURE_VXLAN_IPV6_INNER (1 << 0) +#define FEATURE_VXLAN_IPV6_OUTER (1 << 1) +#define FEATURE_VXLAN_MULTI_WQ (1 << 2) + +#define FEATURE_VXLAN_IPV6 (FEATURE_VXLAN_IPV6_INNER | \ + FEATURE_VXLAN_IPV6_OUTER) + /* * CMD_CONFIG_GRPINTR subcommands */ diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h index 26918335..49504a7d 100644 --- a/drivers/net/enic/base/vnic_enet.h +++ b/drivers/net/enic/base/vnic_enet.h @@ -52,6 +52,10 @@ struct vnic_enet_config { #define VENETF_VXLAN 0x10000 /* VxLAN offload */ #define VENETF_NVGRE 0x20000 /* NVGRE offload */ #define VENETF_GRPINTR 0x40000 /* group interrupt */ +#define VENETF_NICSWITCH 0x80000 /* NICSWITCH enabled */ +#define VENETF_RSSHASH_UDP_WEAK 0x100000 /* VIC has Bodega-style UDP RSS */ +#define VENETF_RSSHASH_UDPIPV4 0x200000 /* Hash on UDP + IPv4 fields */ +#define VENETF_RSSHASH_UDPIPV6 0x400000 /* Hash on UDP + IPv6 fields */ #define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */ #define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */ diff --git a/drivers/net/enic/base/vnic_nic.h b/drivers/net/enic/base/vnic_nic.h index a753b3a5..e318d0cb 100644 --- a/drivers/net/enic/base/vnic_nic.h +++ b/drivers/net/enic/base/vnic_nic.h @@ -27,12 +27,14 @@ #define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL #define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 +#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0) #define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2) #define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4) #define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6) +#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7) static inline void vnic_set_nic_cfg(u32 *nic_cfg, u8 rss_default_cpu, u8 rss_hash_type, diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h index d774bb0d..9619290d 100644 --- a/drivers/net/enic/base/vnic_rq.h +++ b/drivers/net/enic/base/vnic_rq.h @@ -6,6 +6,7 @@ #ifndef _VNIC_RQ_H_ #define _VNIC_RQ_H_ +#include #include "vnic_dev.h" #include "vnic_cq.h" @@ -69,6 +70,7 @@ struct vnic_rq { struct rte_mbuf *pkt_last_seg; unsigned int max_mbufs_per_pkt; uint16_t tot_nb_desc; + bool need_initial_post; }; static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h index 7c069c06..0135bffc 100644 --- a/drivers/net/enic/base/vnic_wq.h +++ b/drivers/net/enic/base/vnic_wq.h @@ -44,6 +44,7 @@ struct vnic_wq_buf { struct vnic_wq { unsigned int index; + uint64_t tx_offload_notsup_mask; struct vnic_dev *vdev; struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ struct vnic_dev_ring ring; diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index c083985e..ee83fe57 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -17,6 +17,7 @@ #include "vnic_rss.h" #include "enic_res.h" #include "cq_enet_desc.h" +#include #include #include @@ -49,6 +50,15 @@ #define ENICPMD_FDIR_MAX 64 +/* + * Interrupt 0: LSC and errors + * Interrupt 1: rx queue 0 + * Interrupt 2: rx queue 1 + * ... + */ +#define ENICPMD_LSC_INTR_OFFSET 0 +#define ENICPMD_RXQ_INTR_OFFSET 1 + struct enic_fdir_node { struct rte_eth_fdir_filter filter; u16 fltr_id; @@ -92,6 +102,7 @@ struct enic { struct vnic_dev *vdev; unsigned int port_id; + bool overlay_offload; struct rte_eth_dev *rte_dev; struct enic_fdir fdir; char bdf_name[ENICPMD_BDF_LENGTH]; @@ -109,7 +120,9 @@ struct enic { u16 max_mtu; u8 adv_filters; u32 flow_filter_mode; - u8 filter_tags; + u8 filter_actions; /* HW supported actions */ + bool vxlan; + bool disable_overlay; /* devargs disable_overlay=1 */ unsigned int flags; unsigned int priv_flags; @@ -126,9 +139,9 @@ struct enic { struct vnic_cq *cq; unsigned int cq_count; /* equals rq_count + wq_count */ - /* interrupt resource */ - struct vnic_intr intr; - unsigned int intr_count; + /* interrupt vectors (len = conf_intr_count) */ + struct vnic_intr *intr; + unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */ /* software counters */ struct enic_soft_stats soft_stats; @@ -146,8 +159,33 @@ struct enic { LIST_HEAD(enic_flows, rte_flow) flows; rte_spinlock_t flows_lock; + + /* RSS */ + uint16_t reta_size; + uint8_t hash_key_size; + uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */ + /* + * Keep a copy of current RSS config for queries, as we cannot retrieve + * it from the NIC. + */ + uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */ + uint8_t rss_enable; + uint64_t rss_hf; /* ETH_RSS flags */ + union vnic_rss_key rss_key; + union vnic_rss_cpu rss_cpu; + + uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */ + uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */ + uint64_t tx_offload_mask; /* PKT_TX flags accepted */ }; +/* Compute ethdev's max packet size from MTU */ +static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu) +{ + /* ethdev max size includes eth and crc whereas NIC MTU does not */ + return mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; +} + /* Get the CQ index from a Start of Packet(SOP) RQ index */ static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx) { @@ -220,54 +258,58 @@ enic_ring_incr(uint32_t n_descriptors, uint32_t idx) return idx; } -extern void enic_fdir_stats_get(struct enic *enic, - struct rte_eth_fdir_stats *stats); -extern int enic_fdir_add_fltr(struct enic *enic, - struct rte_eth_fdir_filter *params); -extern int enic_fdir_del_fltr(struct enic *enic, - struct rte_eth_fdir_filter *params); -extern void enic_free_wq(void *txq); -extern int enic_alloc_intr_resources(struct enic *enic); -extern int enic_setup_finish(struct enic *enic); -extern int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, - unsigned int socket_id, uint16_t nb_desc); -extern void enic_start_wq(struct enic *enic, uint16_t queue_idx); -extern int enic_stop_wq(struct enic *enic, uint16_t queue_idx); -extern void enic_start_rq(struct enic *enic, uint16_t queue_idx); -extern int enic_stop_rq(struct enic *enic, uint16_t queue_idx); -extern void enic_free_rq(void *rxq); -extern int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, - unsigned int socket_id, struct rte_mempool *mp, - uint16_t nb_desc, uint16_t free_thresh); -extern int enic_set_rss_nic_cfg(struct enic *enic); -extern int enic_set_vnic_res(struct enic *enic); -extern int enic_enable(struct enic *enic); -extern int enic_disable(struct enic *enic); -extern void enic_remove(struct enic *enic); -extern int enic_get_link_status(struct enic *enic); -extern int enic_dev_stats_get(struct enic *enic, - struct rte_eth_stats *r_stats); -extern void enic_dev_stats_clear(struct enic *enic); -extern void enic_add_packet_filter(struct enic *enic); +void enic_fdir_stats_get(struct enic *enic, + struct rte_eth_fdir_stats *stats); +int enic_fdir_add_fltr(struct enic *enic, + struct rte_eth_fdir_filter *params); +int enic_fdir_del_fltr(struct enic *enic, + struct rte_eth_fdir_filter *params); +void enic_free_wq(void *txq); +int enic_alloc_intr_resources(struct enic *enic); +int enic_setup_finish(struct enic *enic); +int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, uint16_t nb_desc); +void enic_start_wq(struct enic *enic, uint16_t queue_idx); +int enic_stop_wq(struct enic *enic, uint16_t queue_idx); +void enic_start_rq(struct enic *enic, uint16_t queue_idx); +int enic_stop_rq(struct enic *enic, uint16_t queue_idx); +void enic_free_rq(void *rxq); +int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, + unsigned int socket_id, struct rte_mempool *mp, + uint16_t nb_desc, uint16_t free_thresh); +int enic_set_vnic_res(struct enic *enic); +int enic_init_rss_nic_cfg(struct enic *enic); +int enic_set_rss_conf(struct enic *enic, + struct rte_eth_rss_conf *rss_conf); +int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu); +int enic_set_vlan_strip(struct enic *enic); +int enic_enable(struct enic *enic); +int enic_disable(struct enic *enic); +void enic_remove(struct enic *enic); +int enic_get_link_status(struct enic *enic); +int enic_dev_stats_get(struct enic *enic, + struct rte_eth_stats *r_stats); +void enic_dev_stats_clear(struct enic *enic); +void enic_add_packet_filter(struct enic *enic); int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr); -void enic_del_mac_address(struct enic *enic, int mac_index); -extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq); -extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, - struct rte_mbuf *tx_pkt, unsigned short len, - uint8_t sop, uint8_t eop, uint8_t cq_entry, - uint16_t ol_flags, uint16_t vlan_tag); - -extern void enic_post_wq_index(struct vnic_wq *wq); -extern int enic_probe(struct enic *enic); -extern int enic_clsf_init(struct enic *enic); -extern void enic_clsf_destroy(struct enic *enic); +int enic_del_mac_address(struct enic *enic, int mac_index); +unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq); +void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, + struct rte_mbuf *tx_pkt, unsigned short len, + uint8_t sop, uint8_t eop, uint8_t cq_entry, + uint16_t ol_flags, uint16_t vlan_tag); + +void enic_post_wq_index(struct vnic_wq *wq); +int enic_probe(struct enic *enic); +int enic_clsf_init(struct enic *enic); +void enic_clsf_destroy(struct enic *enic); uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t enic_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); + uint16_t nb_pkts); uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int enic_set_mtu(struct enic *enic, uint16_t new_mtu); diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c index 3ef1d083..9d95201e 100644 --- a/drivers/net/enic/enic_clsf.c +++ b/drivers/net/enic/enic_clsf.c @@ -111,7 +111,6 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, struct rte_eth_fdir_masks *masks) { struct filter_generic_1 *gp = &fltr->u.generic_1; - int i; fltr->type = FILTER_DPDK_1; memset(gp, 0, sizeof(*gp)); @@ -273,18 +272,14 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, ipv6_mask.proto = masks->ipv6_mask.proto; ipv6_val.proto = input->flow.ipv6_flow.proto; } - for (i = 0; i < 4; i++) { - *(uint32_t *)&ipv6_mask.src_addr[i * 4] = - masks->ipv6_mask.src_ip[i]; - *(uint32_t *)&ipv6_val.src_addr[i * 4] = - input->flow.ipv6_flow.src_ip[i]; - } - for (i = 0; i < 4; i++) { - *(uint32_t *)&ipv6_mask.dst_addr[i * 4] = - masks->ipv6_mask.src_ip[i]; - *(uint32_t *)&ipv6_val.dst_addr[i * 4] = - input->flow.ipv6_flow.dst_ip[i]; - } + memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip, + sizeof(ipv6_mask.src_addr)); + memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip, + sizeof(ipv6_val.src_addr)); + memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip, + sizeof(ipv6_mask.dst_addr)); + memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip, + sizeof(ipv6_val.dst_addr)); if (input->flow.ipv6_flow.tc) { ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12; ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12; diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index d84714ef..28630892 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "vnic_intr.h" @@ -39,6 +40,8 @@ static const struct rte_pci_id pci_id_enic_map[] = { {.vendor_id = 0, /* sentinel */}, }; +#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay" + RTE_INIT(enicpmd_init_log); static void enicpmd_init_log(void) @@ -318,52 +321,40 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, return enicpmd_dev_setup_intr(enic); } -static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev, - uint16_t vlan_id, int on) -{ - struct enic *enic = pmd_priv(eth_dev); - int err; - - ENICPMD_FUNC_TRACE(); - if (on) - err = enic_add_vlan(enic, vlan_id); - else - err = enic_del_vlan(enic, vlan_id); - return err; -} - static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct enic *enic = pmd_priv(eth_dev); + uint64_t offloads; ENICPMD_FUNC_TRACE(); + offloads = eth_dev->data->dev_conf.rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { - if (eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_STRIP) + if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) enic->ig_vlan_strip_en = 1; else enic->ig_vlan_strip_en = 0; } - enic_set_rss_nic_cfg(enic); - - if (mask & ETH_VLAN_FILTER_MASK) { + if ((mask & ETH_VLAN_FILTER_MASK) && + (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { dev_warning(enic, "Configuration of VLAN filter is not supported\n"); } - if (mask & ETH_VLAN_EXTEND_MASK) { + if ((mask & ETH_VLAN_EXTEND_MASK) && + (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) { dev_warning(enic, "Configuration of extended VLAN is not supported\n"); } - return 0; + return enic_set_vlan_strip(enic); } static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) { int ret; + int mask; struct enic *enic = pmd_priv(eth_dev); if (rte_eal_process_type() != RTE_PROC_PRIMARY) @@ -378,9 +369,21 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CHECKSUM); - ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK); - - return ret; + /* All vlan offload masks to apply the current settings */ + mask = ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + ret = enicpmd_vlan_offload_set(eth_dev, mask); + if (ret) { + dev_err(enic, "Failed to configure VLAN offloads\n"); + return ret; + } + /* + * Initialize RSS with the default reta and key. If the user key is + * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the + * default key. + */ + return enic_init_rss_nic_cfg(enic); } /* Start the device. @@ -410,10 +413,9 @@ static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev) ENICPMD_FUNC_TRACE(); enic_disable(enic); + memset(&link, 0, sizeof(link)); - rte_atomic64_cmpset((uint64_t *)ð_dev->data->dev_link, - *(uint64_t *)ð_dev->data->dev_link, - *(uint64_t *)&link); + rte_eth_linkstatus_set(eth_dev, &link); } /* @@ -459,27 +461,27 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); - device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */ device_info->max_rx_queues = enic->conf_rq_count / 2; device_info->max_tx_queues = enic->conf_wq_count; device_info->min_rx_bufsize = ENIC_MIN_MTU; - device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4; + /* "Max" mtu is not a typo. HW receives packet sizes up to the + * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is + * a hint to the driver to size receive buffers accordingly so that + * larger-than-vnic-mtu packets get truncated.. For DPDK, we let + * the user decide the buffer size via rxmode.max_rx_pkt_len, basically + * ignoring vNIC mtu. + */ + device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu); device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR; - device_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - device_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + device_info->rx_offload_capa = enic->rx_offload_capa; + device_info->tx_offload_capa = enic->tx_offload_capa; device_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH }; + device_info->reta_size = enic->reta_size; + device_info->hash_key_size = enic->hash_key_size; + device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads; } static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) @@ -571,7 +573,24 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index) return; ENICPMD_FUNC_TRACE(); - enic_del_mac_address(enic, index); + if (enic_del_mac_address(enic, index)) + dev_err(enic, "del mac addr failed\n"); +} + +static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev, + struct ether_addr *addr) +{ + struct enic *enic = pmd_priv(eth_dev); + int ret; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + + ENICPMD_FUNC_TRACE(); + ret = enic_del_mac_address(enic, 0); + if (ret) + return ret; + return enic_set_mac_address(enic, addr->addr_bytes); } static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) @@ -582,6 +601,168 @@ static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) return enic_set_mtu(enic, mtu); } +static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 + *reta_conf, + uint16_t reta_size) +{ + struct enic *enic = pmd_priv(dev); + uint16_t i, idx, shift; + + ENICPMD_FUNC_TRACE(); + if (reta_size != ENIC_RSS_RETA_SIZE) { + dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n", + reta_size, ENIC_RSS_RETA_SIZE); + return -EINVAL; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx( + enic->rss_cpu.cpu[i / 4].b[i % 4]); + } + + return 0; +} + +static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 + *reta_conf, + uint16_t reta_size) +{ + struct enic *enic = pmd_priv(dev); + union vnic_rss_cpu rss_cpu; + uint16_t i, idx, shift; + + ENICPMD_FUNC_TRACE(); + if (reta_size != ENIC_RSS_RETA_SIZE) { + dev_err(enic, "reta_update: wrong reta_size. given=%u" + " expected=%u\n", + reta_size, ENIC_RSS_RETA_SIZE); + return -EINVAL; + } + /* + * Start with the current reta and modify it per reta_conf, as we + * need to push the entire reta even if we only modify one entry. + */ + rss_cpu = enic->rss_cpu; + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + rss_cpu.cpu[i / 4].b[i % 4] = + enic_rte_rq_idx_to_sop_idx( + reta_conf[idx].reta[shift]); + } + return enic_set_rss_reta(enic, &rss_cpu); +} + +static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + return enic_set_rss_conf(enic, rss_conf); +} + +static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + if (rss_conf == NULL) + return -EINVAL; + if (rss_conf->rss_key != NULL && + rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) { + dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u" + " expected=%u+\n", + rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); + return -EINVAL; + } + rss_conf->rss_hf = enic->rss_hf; + if (rss_conf->rss_key != NULL) { + int i; + for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) { + rss_conf->rss_key[i] = + enic->rss_key.key[i / 10].b[i % 10]; + } + rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE; + } + return 0; +} + +static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct enic *enic = pmd_priv(dev); + struct vnic_rq *rq_sop; + struct vnic_rq *rq_data; + struct rte_eth_rxconf *conf; + uint16_t sop_queue_idx; + uint16_t data_queue_idx; + + ENICPMD_FUNC_TRACE(); + sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id); + data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id); + rq_sop = &enic->rq[sop_queue_idx]; + rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */ + qinfo->mp = rq_sop->mp; + qinfo->scattered_rx = rq_sop->data_queue_enable; + qinfo->nb_desc = rq_sop->ring.desc_count; + if (qinfo->scattered_rx) + qinfo->nb_desc += rq_data->ring.desc_count; + conf = &qinfo->conf; + memset(conf, 0, sizeof(*conf)); + conf->rx_free_thresh = rq_sop->rx_free_thresh; + conf->rx_drop_en = 1; + /* + * Except VLAN stripping (port setting), all the checksum offloads + * are always enabled. + */ + conf->offloads = enic->rx_offload_capa; + if (!enic->ig_vlan_strip_en) + conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + /* rx_thresh and other fields are not applicable for enic */ +} + +static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev, + __rte_unused uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct enic *enic = pmd_priv(dev); + + ENICPMD_FUNC_TRACE(); + qinfo->nb_desc = enic->config.wq_desc_count; + memset(&qinfo->conf, 0, sizeof(qinfo->conf)); + qinfo->conf.offloads = enic->tx_offload_capa; + /* tx_thresh, and all the other fields are not applicable for enic */ +} + +static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); + return 0; +} + +static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, + uint16_t rx_queue_id) +{ + struct enic *enic = pmd_priv(eth_dev); + + ENICPMD_FUNC_TRACE(); + vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); + return 0; +} + static const struct eth_dev_ops enicpmd_eth_dev_ops = { .dev_configure = enicpmd_dev_configure, .dev_start = enicpmd_dev_start, @@ -600,7 +781,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .dev_infos_get = enicpmd_dev_info_get, .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, .mtu_set = enicpmd_mtu_set, - .vlan_filter_set = enicpmd_vlan_filter_set, + .vlan_filter_set = NULL, .vlan_tpid_set = NULL, .vlan_offload_set = enicpmd_vlan_offload_set, .vlan_strip_queue_set = NULL, @@ -614,6 +795,10 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .rx_descriptor_done = NULL, .tx_queue_setup = enicpmd_dev_tx_queue_setup, .tx_queue_release = enicpmd_dev_tx_queue_release, + .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable, + .rxq_info_get = enicpmd_dev_rxq_info_get, + .txq_info_get = enicpmd_dev_txq_info_get, .dev_led_on = NULL, .dev_led_off = NULL, .flow_ctrl_get = NULL, @@ -621,9 +806,57 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .priority_flow_ctrl_set = NULL, .mac_addr_add = enicpmd_add_mac_addr, .mac_addr_remove = enicpmd_remove_mac_addr, + .mac_addr_set = enicpmd_set_mac_addr, .filter_ctrl = enicpmd_dev_filter_ctrl, + .reta_query = enicpmd_dev_rss_reta_query, + .reta_update = enicpmd_dev_rss_reta_update, + .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get, + .rss_hash_update = enicpmd_dev_rss_hash_update, }; +static int enic_parse_disable_overlay(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct enic *enic; + + enic = (struct enic *)opaque; + if (strcmp(value, "0") == 0) { + enic->disable_overlay = false; + } else if (strcmp(value, "1") == 0) { + enic->disable_overlay = true; + } else { + dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY + ": expected=0|1 given=%s\n", value); + return -EINVAL; + } + return 0; +} + +static int enic_check_devargs(struct rte_eth_dev *dev) +{ + static const char *const valid_keys[] = { + ENIC_DEVARG_DISABLE_OVERLAY, NULL}; + struct enic *enic = pmd_priv(dev); + struct rte_kvargs *kvlist; + + ENICPMD_FUNC_TRACE(); + + enic->disable_overlay = false; + if (!dev->device->devargs) + return 0; + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY, + enic_parse_disable_overlay, enic) < 0) { + rte_kvargs_free(kvlist); + return -EINVAL; + } + rte_kvargs_free(kvlist); + return 0; +} + struct enic *enicpmd_list_head = NULL; /* Initialize the driver * It returns 0 on success. @@ -633,6 +866,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pdev; struct rte_pci_addr *addr; struct enic *enic = pmd_priv(eth_dev); + int err; ENICPMD_FUNC_TRACE(); @@ -651,6 +885,9 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", addr->domain, addr->bus, addr->devid, addr->function); + err = enic_check_devargs(eth_dev); + if (err) + return err; return enic_probe(enic); } @@ -676,3 +913,5 @@ static struct rte_pci_driver rte_enic_pmd = { RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map); RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_enic, + ENIC_DEVARG_DISABLE_OVERLAY "=<0|1> "); diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c index 28923b0e..0cf04aef 100644 --- a/drivers/net/enic/enic_flow.c +++ b/drivers/net/enic/enic_flow.c @@ -3,6 +3,7 @@ */ #include +#include #include #include #include @@ -273,21 +274,33 @@ static const enum rte_flow_action_type enic_supported_actions_v1[] = { }; /** Supported actions for newer NICs */ -static const enum rte_flow_action_type enic_supported_actions_v2[] = { +static const enum rte_flow_action_type enic_supported_actions_v2_id[] = { RTE_FLOW_ACTION_TYPE_QUEUE, RTE_FLOW_ACTION_TYPE_MARK, RTE_FLOW_ACTION_TYPE_FLAG, RTE_FLOW_ACTION_TYPE_END, }; +static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = { + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_MARK, + RTE_FLOW_ACTION_TYPE_FLAG, + RTE_FLOW_ACTION_TYPE_DROP, + RTE_FLOW_ACTION_TYPE_END, +}; + /** Action capabilities indexed by NIC version information */ static const struct enic_action_cap enic_action_cap[] = { [FILTER_ACTION_RQ_STEERING_FLAG] = { .actions = enic_supported_actions_v1, .copy_fn = enic_copy_action_v1, }, - [FILTER_ACTION_V2_ALL] = { - .actions = enic_supported_actions_v2, + [FILTER_ACTION_FILTER_ID_FLAG] = { + .actions = enic_supported_actions_v2_id, + .copy_fn = enic_copy_action_v2, + }, + [FILTER_ACTION_DROP_FLAG] = { + .actions = enic_supported_actions_v2_drop, .copy_fn = enic_copy_action_v2, }, }; @@ -544,16 +557,21 @@ enic_copy_item_vlan_v2(const struct rte_flow_item *item, if (!spec) return 0; - /* Don't support filtering in tpid */ - if (mask) { - if (mask->tpid != 0) - return ENOTSUP; - } else { + if (!mask) mask = &rte_flow_item_vlan_mask; - RTE_ASSERT(mask->tpid == 0); - } if (*inner_ofst == 0) { + struct ether_hdr *eth_mask = + (void *)gp->layer[FILTER_GENERIC_1_L2].mask; + struct ether_hdr *eth_val = + (void *)gp->layer[FILTER_GENERIC_1_L2].val; + + /* Outer TPID cannot be matched */ + if (eth_mask->ether_type) + return ENOTSUP; + eth_mask->ether_type = mask->inner_type; + eth_val->ether_type = spec->inner_type; + /* Outer header. Use the vlan mask/val fields */ gp->mask_vlan = mask->tci; gp->val_vlan = spec->tci; @@ -952,6 +970,9 @@ static int enic_copy_action_v1(const struct rte_flow_action actions[], struct filter_action_v2 *enic_action) { + enum { FATE = 1, }; + uint32_t overlap = 0; + FLOW_TRACE(); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -963,6 +984,10 @@ enic_copy_action_v1(const struct rte_flow_action actions[], const struct rte_flow_action_queue *queue = (const struct rte_flow_action_queue *) actions->conf; + + if (overlap & FATE) + return ENOTSUP; + overlap |= FATE; enic_action->rq_idx = enic_rte_rq_idx_to_sop_idx(queue->index); break; @@ -972,6 +997,8 @@ enic_copy_action_v1(const struct rte_flow_action actions[], break; } } + if (!(overlap & FATE)) + return ENOTSUP; enic_action->type = FILTER_ACTION_RQ_STEERING; return 0; } @@ -989,6 +1016,9 @@ static int enic_copy_action_v2(const struct rte_flow_action actions[], struct filter_action_v2 *enic_action) { + enum { FATE = 1, MARK = 2, }; + uint32_t overlap = 0; + FLOW_TRACE(); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -997,6 +1027,10 @@ enic_copy_action_v2(const struct rte_flow_action actions[], const struct rte_flow_action_queue *queue = (const struct rte_flow_action_queue *) actions->conf; + + if (overlap & FATE) + return ENOTSUP; + overlap |= FATE; enic_action->rq_idx = enic_rte_rq_idx_to_sop_idx(queue->index); enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG; @@ -1007,6 +1041,9 @@ enic_copy_action_v2(const struct rte_flow_action actions[], (const struct rte_flow_action_mark *) actions->conf; + if (overlap & MARK) + return ENOTSUP; + overlap |= MARK; /* ENIC_MAGIC_FILTER_ID is reserved and is the highest * in the range of allows mark ids. */ @@ -1017,10 +1054,20 @@ enic_copy_action_v2(const struct rte_flow_action actions[], break; } case RTE_FLOW_ACTION_TYPE_FLAG: { + if (overlap & MARK) + return ENOTSUP; + overlap |= MARK; enic_action->filter_id = ENIC_MAGIC_FILTER_ID; enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG; break; } + case RTE_FLOW_ACTION_TYPE_DROP: { + if (overlap & FATE) + return ENOTSUP; + overlap |= FATE; + enic_action->flags |= FILTER_ACTION_DROP_FLAG; + break; + } case RTE_FLOW_ACTION_TYPE_VOID: continue; default: @@ -1028,6 +1075,8 @@ enic_copy_action_v2(const struct rte_flow_action actions[], break; } } + if (!(overlap & FATE)) + return ENOTSUP; enic_action->type = FILTER_ACTION_V2; return 0; } @@ -1059,10 +1108,14 @@ enic_get_filter_cap(struct enic *enic) static const struct enic_action_cap * enic_get_action_cap(struct enic *enic) { - static const struct enic_action_cap *ea; - - if (enic->filter_tags) - ea = &enic_action_cap[FILTER_ACTION_V2_ALL]; + const struct enic_action_cap *ea; + uint8_t actions; + + actions = enic->filter_actions; + if (actions & FILTER_ACTION_DROP_FLAG) + ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG]; + else if (actions & FILTER_ACTION_FILTER_ID_FLAG) + ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG]; else ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG]; return ea; @@ -1268,6 +1321,12 @@ enic_flow_parse(struct rte_eth_dev *dev, NULL, "egress is not supported"); return -rte_errno; + } else if (attrs->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, + "transfer is not supported"); + return -rte_errno; } else if (!attrs->ingress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index ec9d343f..a25d303d 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -162,13 +162,12 @@ int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) return 0; } -void enic_del_mac_address(struct enic *enic, int mac_index) +int enic_del_mac_address(struct enic *enic, int mac_index) { struct rte_eth_dev *eth_dev = enic->rte_dev; uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes; - if (vnic_dev_del_addr(enic->vdev, mac_addr)) - dev_err(enic, "del mac addr failed\n"); + return vnic_dev_del_addr(enic->vdev, mac_addr); } int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) @@ -200,10 +199,15 @@ void enic_init_vnic_resources(struct enic *enic) { unsigned int error_interrupt_enable = 1; unsigned int error_interrupt_offset = 0; + unsigned int rxq_interrupt_enable = 0; + unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET; unsigned int index = 0; unsigned int cq_idx; struct vnic_rq *data_rq; + if (enic->rte_dev->data->dev_conf.intr_conf.rxq) + rxq_interrupt_enable = 1; + for (index = 0; index < enic->rq_count; index++) { cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index)); @@ -225,11 +229,13 @@ void enic_init_vnic_resources(struct enic *enic) 0 /* cq_head */, 0 /* cq_tail */, 1 /* cq_tail_color */, - 0 /* interrupt_enable */, + rxq_interrupt_enable, 1 /* cq_entry_enable */, 0 /* cq_message_enable */, - 0 /* interrupt offset */, + rxq_interrupt_offset, 0 /* cq_message_addr */); + if (rxq_interrupt_enable) + rxq_interrupt_offset++; } for (index = 0; index < enic->wq_count; index++) { @@ -237,6 +243,9 @@ void enic_init_vnic_resources(struct enic *enic) enic_cq_wq(enic, index), error_interrupt_enable, error_interrupt_offset); + /* Compute unsupported ol flags for enic_prep_pkts() */ + enic->wq[index].tx_offload_notsup_mask = + PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask; cq_idx = enic_cq_wq(enic, index); vnic_cq_init(&enic->cq[cq_idx], @@ -252,10 +261,12 @@ void enic_init_vnic_resources(struct enic *enic) (u64)enic->wq[index].cqmsg_rz->iova); } - vnic_intr_init(&enic->intr, - enic->config.intr_timer_usec, - enic->config.intr_timer_type, - /*mask_on_assertion*/1); + for (index = 0; index < enic->intr_count; index++) { + vnic_intr_init(&enic->intr[index], + enic->config.intr_timer_usec, + enic->config.intr_timer_type, + /*mask_on_assertion*/1); + } } @@ -266,6 +277,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) struct rq_enet_desc *rqd = rq->ring.descs; unsigned i; dma_addr_t dma_addr; + uint32_t max_rx_pkt_len; + uint16_t rq_buf_len; if (!rq->in_use) return 0; @@ -273,6 +286,18 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, rq->ring.desc_count); + /* + * If *not* using scatter and the mbuf size is greater than the + * requested max packet size (max_rx_pkt_len), then reduce the + * posted buffer size to max_rx_pkt_len. HW still receives packets + * larger than max_rx_pkt_len, but they will be truncated, which we + * drop in the rx handler. Not ideal, but better than returning + * large packets when the user is not expecting them. + */ + max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len; + rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM; + if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable) + rq_buf_len = max_rx_pkt_len; for (i = 0; i < rq->ring.desc_count; i++, rqd++) { mb = rte_mbuf_raw_alloc(rq->mp); if (mb == NULL) { @@ -287,9 +312,27 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) rq_enet_desc_enc(rqd, dma_addr, (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP : RQ_ENET_TYPE_NOT_SOP), - mb->buf_len - RTE_PKTMBUF_HEADROOM); + rq_buf_len); rq->mbuf_ring[i] = mb; } + /* + * Do not post the buffers to the NIC until we enable the RQ via + * enic_start_rq(). + */ + rq->need_initial_post = true; + return 0; +} + +/* + * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has + * allocated the buffers and filled the RQ descriptor ring. Just need to push + * the post index to the NIC. + */ +static void +enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq) +{ + if (!rq->in_use || !rq->need_initial_post) + return; /* make sure all prior writes are complete before doing the PIO write */ rte_rmb(); @@ -304,9 +347,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) iowrite32(rq->posted_index, &rq->ctrl->posted_index); iowrite32(0, &rq->ctrl->fetch_index); rte_rmb(); - - return 0; - + rq->need_initial_post = false; } static void * @@ -319,8 +360,8 @@ enic_alloc_consistent(void *priv, size_t size, struct enic *enic = (struct enic *)priv; struct enic_memzone_entry *mze; - rz = rte_memzone_reserve_aligned((const char *)name, - size, SOCKET_ID_ANY, 0, ENIC_ALIGN); + rz = rte_memzone_reserve_aligned((const char *)name, size, + SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN); if (!rz) { pr_err("%s : Failed to allocate memory requested for %s\n", __func__, name); @@ -379,16 +420,14 @@ enic_free_consistent(void *priv, int enic_link_update(struct enic *enic) { struct rte_eth_dev *eth_dev = enic->rte_dev; - int ret; - int link_status = 0; + struct rte_eth_link link; - link_status = enic_get_link_status(enic); - ret = (link_status == enic->link_status); - enic->link_status = link_status; - eth_dev->data->dev_link.link_status = link_status; - eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); - return ret; + memset(&link, 0, sizeof(link)); + link.link_status = enic_get_link_status(enic); + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = vnic_dev_port_speed(enic->vdev); + + return rte_eth_linkstatus_set(eth_dev, &link); } static void @@ -397,13 +436,62 @@ enic_intr_handler(void *arg) struct rte_eth_dev *dev = (struct rte_eth_dev *)arg; struct enic *enic = pmd_priv(dev); - vnic_intr_return_all_credits(&enic->intr); + vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]); enic_link_update(enic); _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); enic_log_q_error(enic); } +static int enic_rxq_intr_init(struct enic *enic) +{ + struct rte_intr_handle *intr_handle; + uint32_t rxq_intr_count, i; + int err; + + intr_handle = enic->rte_dev->intr_handle; + if (!enic->rte_dev->data->dev_conf.intr_conf.rxq) + return 0; + /* + * Rx queue interrupts only work when we have MSI-X interrupts, + * one per queue. Sharing one interrupt is technically + * possible with VIC, but it is not worth the complications it brings. + */ + if (!rte_intr_cap_multiple(intr_handle)) { + dev_err(enic, "Rx queue interrupts require MSI-X interrupts" + " (vfio-pci driver)\n"); + return -ENOTSUP; + } + rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET; + err = rte_intr_efd_enable(intr_handle, rxq_intr_count); + if (err) { + dev_err(enic, "Failed to enable event fds for Rx queue" + " interrupts\n"); + return err; + } + intr_handle->intr_vec = rte_zmalloc("enic_intr_vec", + rxq_intr_count * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + dev_err(enic, "Failed to allocate intr_vec\n"); + return -ENOMEM; + } + for (i = 0; i < rxq_intr_count; i++) + intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET; + return 0; +} + +static void enic_rxq_intr_deinit(struct enic *enic) +{ + struct rte_intr_handle *intr_handle; + + intr_handle = enic->rte_dev->intr_handle; + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } +} + int enic_enable(struct enic *enic) { unsigned int index; @@ -420,6 +508,9 @@ int enic_enable(struct enic *enic) if (eth_dev->data->dev_conf.intr_conf.lsc) vnic_dev_notify_set(enic->vdev, 0); + err = enic_rxq_intr_init(enic); + if (err) + return err; if (enic_clsf_init(enic)) dev_warning(enic, "Init of hash table for clsf failed."\ "Flow director feature will not work\n"); @@ -457,7 +548,8 @@ int enic_enable(struct enic *enic) enic_intr_handler, (void *)enic->rte_dev); rte_intr_enable(&(enic->pdev->intr_handle)); - vnic_intr_unmask(&enic->intr); + /* Unmask LSC interrupt */ + vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]); return 0; } @@ -465,17 +557,21 @@ int enic_enable(struct enic *enic) int enic_alloc_intr_resources(struct enic *enic) { int err; + unsigned int i; dev_info(enic, "vNIC resources used: "\ "wq %d rq %d cq %d intr %d\n", enic->wq_count, enic_vnic_rq_count(enic), enic->cq_count, enic->intr_count); - err = vnic_intr_alloc(enic->vdev, &enic->intr, 0); - if (err) - enic_free_vnic_resources(enic); - - return err; + for (i = 0; i < enic->intr_count; i++) { + err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i); + if (err) { + enic_free_vnic_resources(enic); + return err; + } + } + return 0; } void enic_free_rq(void *rxq) @@ -539,10 +635,13 @@ void enic_start_rq(struct enic *enic, uint16_t queue_idx) rq_data = &enic->rq[rq_sop->data_queue_idx]; struct rte_eth_dev *eth_dev = enic->rte_dev; - if (rq_data->in_use) + if (rq_data->in_use) { vnic_rq_enable(rq_data); + enic_initial_post_rx(enic, rq_data); + } rte_mb(); vnic_rq_enable(rq_sop); + enic_initial_post_rx(enic, rq_sop); eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; } @@ -581,7 +680,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, unsigned int mbuf_size, mbufs_per_pkt; unsigned int nb_sop_desc, nb_data_desc; uint16_t min_sop, max_sop, min_data, max_data; - uint16_t mtu = enic->rte_dev->data->mtu; + uint32_t max_rx_pkt_len; rq_sop->is_sop = 1; rq_sop->data_queue_idx = data_queue_idx; @@ -599,22 +698,42 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM); + /* max_rx_pkt_len includes the ethernet header and CRC. */ + max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len; if (enic->rte_dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx); - /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */ - mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) + - (mbuf_size - 1)) / mbuf_size; + /* ceil((max pkt len)/mbuf_size) */ + mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size; } else { dev_info(enic, "Scatter rx mode disabled\n"); mbufs_per_pkt = 1; + if (max_rx_pkt_len > mbuf_size) { + dev_warning(enic, "The maximum Rx packet size (%u) is" + " larger than the mbuf size (%u), and" + " scatter is disabled. Larger packets will" + " be truncated.\n", + max_rx_pkt_len, mbuf_size); + } } if (mbufs_per_pkt > 1) { dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx); rq_sop->data_queue_enable = 1; rq_data->in_use = 1; + /* + * HW does not directly support rxmode.max_rx_pkt_len. HW always + * receives packet sizes up to the "max" MTU. + * If not using scatter, we can achieve the effect of dropping + * larger packets by reducing the size of posted buffers. + * See enic_alloc_rx_queue_mbufs(). + */ + if (max_rx_pkt_len < + enic_mtu_to_max_rx_pktlen(enic->max_mtu)) { + dev_warning(enic, "rxmode.max_rx_pkt_len is ignored" + " when scatter rx mode is in use.\n"); + } } else { dev_info(enic, "Rq %u Scatter rx mode not being used\n", queue_idx); @@ -654,8 +773,9 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, nb_data_desc = max_data; } if (mbufs_per_pkt > 1) { - dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n", - mtu, mbuf_size, min_sop + min_data, + dev_info(enic, "For max packet size %u and mbuf size %u valid" + " rx descriptor range is %u to %u\n", + max_rx_pkt_len, mbuf_size, min_sop + min_data, max_sop + max_data); } dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n", @@ -788,9 +908,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, instance++); wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name, - sizeof(uint32_t), - SOCKET_ID_ANY, 0, - ENIC_ALIGN); + sizeof(uint32_t), SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN); if (!wq->cqmsg_rz) return -ENOMEM; @@ -802,8 +921,11 @@ int enic_disable(struct enic *enic) unsigned int i; int err; - vnic_intr_mask(&enic->intr); - (void)vnic_intr_masked(&enic->intr); /* flush write */ + for (i = 0; i < enic->intr_count; i++) { + vnic_intr_mask(&enic->intr[i]); + (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ + } + enic_rxq_intr_deinit(enic); rte_intr_disable(&enic->pdev->intr_handle); rte_intr_callback_unregister(&enic->pdev->intr_handle, enic_intr_handler, @@ -846,7 +968,8 @@ int enic_disable(struct enic *enic) vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); for (i = 0; i < enic->cq_count; i++) vnic_cq_clean(&enic->cq[i]); - vnic_intr_clean(&enic->intr); + for (i = 0; i < enic->intr_count; i++) + vnic_intr_clean(&enic->intr[i]); return 0; } @@ -879,9 +1002,10 @@ static int enic_dev_wait(struct vnic_dev *vdev, static int enic_dev_open(struct enic *enic) { int err; + int flags = CMD_OPENF_IG_DESCCACHE; err = enic_dev_wait(enic->vdev, vnic_dev_open, - vnic_dev_open_done, 0); + vnic_dev_open_done, flags); if (err) dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", err); @@ -889,44 +1013,42 @@ static int enic_dev_open(struct enic *enic) return err; } -static int enic_set_rsskey(struct enic *enic) +static int enic_set_rsskey(struct enic *enic, uint8_t *user_key) { dma_addr_t rss_key_buf_pa; union vnic_rss_key *rss_key_buf_va = NULL; - static union vnic_rss_key rss_key = { - .key = { - [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}}, - [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}}, - [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}}, - [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}}, - } - }; - int err; + int err, i; u8 name[NAME_MAX]; + RTE_ASSERT(user_key != NULL); snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name); rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key), &rss_key_buf_pa, name); if (!rss_key_buf_va) return -ENOMEM; - rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); + for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) + rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i]; err = enic_set_rss_key(enic, rss_key_buf_pa, sizeof(union vnic_rss_key)); + /* Save for later queries */ + if (!err) { + rte_memcpy(&enic->rss_key, rss_key_buf_va, + sizeof(union vnic_rss_key)); + } enic_free_consistent(enic, sizeof(union vnic_rss_key), rss_key_buf_va, rss_key_buf_pa); return err; } -static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) +int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu) { dma_addr_t rss_cpu_buf_pa; union vnic_rss_cpu *rss_cpu_buf_va = NULL; - int i; int err; u8 name[NAME_MAX]; @@ -936,9 +1058,7 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) if (!rss_cpu_buf_va) return -ENOMEM; - for (i = 0; i < (1 << rss_hash_bits); i++) - (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] = - enic_rte_rq_idx_to_sop_idx(i % enic->rq_count); + rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu)); err = enic_set_rss_cpu(enic, rss_cpu_buf_pa, @@ -947,6 +1067,9 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) enic_free_consistent(enic, sizeof(union vnic_rss_cpu), rss_cpu_buf_va, rss_cpu_buf_pa); + /* Save for later queries */ + if (!err) + rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu)); return err; } @@ -956,8 +1079,6 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, const u8 tso_ipid_split_en = 0; int err; - /* Enable VLAN tag stripping */ - err = enic_set_nic_cfg(enic, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, @@ -967,47 +1088,50 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, return err; } -int enic_set_rss_nic_cfg(struct enic *enic) +/* Initialize RSS with defaults, called from dev_configure */ +int enic_init_rss_nic_cfg(struct enic *enic) { - const u8 rss_default_cpu = 0; - const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | - NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | - NIC_CFG_RSS_HASH_TYPE_IPV6 | - NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; - const u8 rss_hash_bits = 7; - const u8 rss_base_cpu = 0; - u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); - - if (rss_enable) { - if (!enic_set_rsskey(enic)) { - if (enic_set_rsscpu(enic, rss_hash_bits)) { - rss_enable = 0; - dev_warning(enic, "RSS disabled, "\ - "Failed to set RSS cpu indirection table."); - } - } else { - rss_enable = 0; - dev_warning(enic, - "RSS disabled, Failed to set RSS key.\n"); + static uint8_t default_rss_key[] = { + 85, 67, 83, 97, 119, 101, 115, 111, 109, 101, + 80, 65, 76, 79, 117, 110, 105, 113, 117, 101, + 76, 73, 78, 85, 88, 114, 111, 99, 107, 115, + 69, 78, 73, 67, 105, 115, 99, 111, 111, 108, + }; + struct rte_eth_rss_conf rss_conf; + union vnic_rss_cpu rss_cpu; + int ret, i; + + rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf; + /* + * If setting key for the first time, and the user gives us none, then + * push the default key to NIC. + */ + if (rss_conf.rss_key == NULL) { + rss_conf.rss_key = default_rss_key; + rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE; + } + ret = enic_set_rss_conf(enic, &rss_conf); + if (ret) { + dev_err(enic, "Failed to configure RSS\n"); + return ret; + } + if (enic->rss_enable) { + /* If enabling RSS, use the default reta */ + for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) { + rss_cpu.cpu[i / 4].b[i % 4] = + enic_rte_rq_idx_to_sop_idx(i % enic->rq_count); } + ret = enic_set_rss_reta(enic, &rss_cpu); + if (ret) + dev_err(enic, "Failed to set RSS indirection table\n"); } - - return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, - rss_hash_bits, rss_base_cpu, rss_enable); + return ret; } int enic_setup_finish(struct enic *enic) { - int ret; - enic_init_soft_stats(enic); - ret = enic_set_rss_nic_cfg(enic); - if (ret) { - dev_err(enic, "Failed to config nic, aborting.\n"); - return -1; - } - /* Default conf */ vnic_dev_packet_filter(enic->vdev, 1 /* directed */, @@ -1022,6 +1146,112 @@ int enic_setup_finish(struct enic *enic) return 0; } +static int enic_rss_conf_valid(struct enic *enic, + struct rte_eth_rss_conf *rss_conf) +{ + /* RSS is disabled per VIC settings. Ignore rss_conf. */ + if (enic->flow_type_rss_offloads == 0) + return 0; + if (rss_conf->rss_key != NULL && + rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) { + dev_err(enic, "Given rss_key is %d bytes, it must be %d\n", + rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); + return -EINVAL; + } + if (rss_conf->rss_hf != 0 && + (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) { + dev_err(enic, "Given rss_hf contains none of the supported" + " types\n"); + return -EINVAL; + } + return 0; +} + +/* Set hash type and key according to rss_conf */ +int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev *eth_dev; + uint64_t rss_hf; + u8 rss_hash_type; + u8 rss_enable; + int ret; + + RTE_ASSERT(rss_conf != NULL); + ret = enic_rss_conf_valid(enic, rss_conf); + if (ret) { + dev_err(enic, "RSS configuration (rss_conf) is invalid\n"); + return ret; + } + + eth_dev = enic->rte_dev; + rss_hash_type = 0; + rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads; + if (enic->rq_count > 1 && + (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) && + rss_hf != 0) { + rss_enable = 1; + if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4; + if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4; + if (ENIC_SETTING(enic, RSSHASH_UDP_WEAK)) { + /* + * 'TCP' is not a typo. The "weak" version of + * UDP RSS requires both the TCP and UDP bits + * be set. It does enable TCP RSS as well. + */ + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4; + } + } + if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX | + ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6; + if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) { + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6; + if (ENIC_SETTING(enic, RSSHASH_UDP_WEAK)) + rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + } + } else { + rss_enable = 0; + rss_hf = 0; + } + + /* Set the hash key if provided */ + if (rss_enable && rss_conf->rss_key) { + ret = enic_set_rsskey(enic, rss_conf->rss_key); + if (ret) { + dev_err(enic, "Failed to set RSS key\n"); + return ret; + } + } + + ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type, + ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU, + rss_enable); + if (!ret) { + enic->rss_hf = rss_hf; + enic->rss_hash_type = rss_hash_type; + enic->rss_enable = rss_enable; + } + return 0; +} + +int enic_set_vlan_strip(struct enic *enic) +{ + /* + * Unfortunately, VLAN strip on/off and RSS on/off are configured + * together. So, re-do niccfg, preserving the current RSS settings. + */ + return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type, + ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU, + enic->rss_enable); +} + void enic_add_packet_filter(struct enic *enic) { /* Args -> directed, multicast, broadcast, promisc, allmulti */ @@ -1043,6 +1273,7 @@ static void enic_dev_deinit(struct enic *enic) rte_free(eth_dev->data->mac_addrs); rte_free(enic->cq); + rte_free(enic->intr); rte_free(enic->rq); rte_free(enic->wq); } @@ -1052,12 +1283,16 @@ int enic_set_vnic_res(struct enic *enic) { struct rte_eth_dev *eth_dev = enic->rte_dev; int rc = 0; - unsigned int required_rq, required_wq, required_cq; + unsigned int required_rq, required_wq, required_cq, required_intr; /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */ required_rq = eth_dev->data->nb_rx_queues * 2; required_wq = eth_dev->data->nb_tx_queues; required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues; + required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */ + if (eth_dev->data->dev_conf.intr_conf.rxq) { + required_intr += eth_dev->data->nb_rx_queues; + } if (enic->conf_rq_count < required_rq) { dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n", @@ -1076,11 +1311,18 @@ int enic_set_vnic_res(struct enic *enic) required_cq, enic->conf_cq_count); rc = -EINVAL; } + if (enic->conf_intr_count < required_intr) { + dev_err(dev, "Not enough Interrupts to support Rx queue" + " interrupts. Required:%u, Configured:%u\n", + required_intr, enic->conf_intr_count); + rc = -EINVAL; + } if (rc == 0) { enic->rq_count = eth_dev->data->nb_rx_queues; enic->wq_count = eth_dev->data->nb_tx_queues; enic->cq_count = enic->rq_count + enic->wq_count; + enic->intr_count = required_intr; } return rc; @@ -1176,20 +1418,26 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) "MTU (%u) is greater than value configured in NIC (%u)\n", new_mtu, config_mtu); - /* The easy case is when scatter is disabled. However if the MTU - * becomes greater than the mbuf data size, packet drops will ensue. + /* Update the MTU and maximum packet length */ + eth_dev->data->mtu = new_mtu; + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = + enic_mtu_to_max_rx_pktlen(new_mtu); + + /* + * If the device has not started (enic_enable), nothing to do. + * Later, enic_enable() will set up RQs reflecting the new maximum + * packet length. */ - if (!(enic->rte_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_SCATTER)) { - eth_dev->data->mtu = new_mtu; + if (!eth_dev->data->dev_started) goto set_mtu_done; - } - /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to - * change Rx scatter mode if necessary for better performance. I.e. if - * MTU was greater than the mbuf size and now it's less, scatter Rx - * doesn't have to be used and vice versa. - */ + /* + * The device has started, re-do RQs on the fly. In the process, we + * pick up the new maximum packet length. + * + * Some applications rely on the ability to change MTU without stopping + * the device. So keep this behavior for now. + */ rte_spinlock_lock(&enic->mtu_lock); /* Stop traffic on all RQs */ @@ -1214,12 +1462,12 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) /* now it is safe to reconfigure the RQs */ - /* update the mtu */ - eth_dev->data->mtu = new_mtu; /* free and reallocate RQs with the new MTU */ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) { rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; + if (!rq->in_use) + continue; enic_free_rq(rq); rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp, @@ -1282,6 +1530,8 @@ static int enic_dev_init(struct enic *enic) /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */ enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) * enic->conf_cq_count, 8); + enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) * + enic->conf_intr_count, 8); enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) * enic->conf_rq_count, 8); enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) * @@ -1290,6 +1540,10 @@ static int enic_dev_init(struct enic *enic) dev_err(enic, "failed to allocate vnic_cq, aborting.\n"); return -1; } + if (enic->conf_intr_count > 0 && enic->intr == NULL) { + dev_err(enic, "failed to allocate vnic_intr, aborting.\n"); + return -1; + } if (enic->conf_rq_count > 0 && enic->rq == NULL) { dev_err(enic, "failed to allocate vnic_rq, aborting.\n"); return -1; @@ -1319,6 +1573,27 @@ static int enic_dev_init(struct enic *enic) /* set up link status checking */ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ + enic->overlay_offload = false; + if (!enic->disable_overlay && enic->vxlan && + /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */ + vnic_dev_overlay_offload_ctrl(enic->vdev, + OVERLAY_FEATURE_VXLAN, + OVERLAY_OFFLOAD_ENABLE) == 0) { + enic->tx_offload_capa |= + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO; + /* + * Do not add PKT_TX_OUTER_{IPV4,IPV6} as they are not + * 'offload' flags (i.e. not part of PKT_TX_OFFLOAD_MASK). + */ + enic->tx_offload_mask |= + PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TUNNEL_MASK; + enic->overlay_offload = true; + dev_info(enic, "Overlay offload is enabled\n"); + } + return 0; } @@ -1351,6 +1626,15 @@ int enic_probe(struct enic *enic) enic_alloc_consistent, enic_free_consistent); + /* + * Allocate the consistent memory for stats upfront so both primary and + * secondary processes can dump stats. + */ + err = vnic_dev_alloc_stats_mem(enic->vdev); + if (err) { + dev_err(enic, "Failed to allocate cmd memory, aborting\n"); + goto err_out_unregister; + } /* Issue device open to get device in known state */ err = enic_dev_open(enic); if (err) { diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c index c99d6183..6b404c3c 100644 --- a/drivers/net/enic/enic_res.c +++ b/drivers/net/enic/enic_res.c @@ -76,19 +76,24 @@ int enic_get_vnic_config(struct enic *enic) ? "" : "not ")); err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode, - &enic->filter_tags); + &enic->filter_actions); if (err) { dev_err(enic_get_dev(enic), "Error getting filter modes, %d\n", err); return err; } - dev_info(enic, "Flow api filter mode: %s, Filter tagging %savailable\n", + dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s\n", ((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" : ((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" : ((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" : "NONE"))), - ((enic->filter_tags) ? "" : "not ")); + ((enic->filter_actions & FILTER_ACTION_RQ_STEERING_FLAG) ? + "steer " : ""), + ((enic->filter_actions & FILTER_ACTION_FILTER_ID_FLAG) ? + "tag " : ""), + ((enic->filter_actions & FILTER_ACTION_DROP_FLAG) ? + "drop " : "")); c->wq_desc_count = min_t(u32, ENIC_MAX_WQ_DESCS, @@ -117,7 +122,10 @@ int enic_get_vnic_config(struct enic *enic) "loopback tag 0x%04x\n", ENIC_SETTING(enic, TXCSUM) ? "yes" : "no", ENIC_SETTING(enic, RXCSUM) ? "yes" : "no", - ENIC_SETTING(enic, RSS) ? "yes" : "no", + ENIC_SETTING(enic, RSS) ? + (ENIC_SETTING(enic, RSSHASH_UDPIPV4) ? "+UDP" : + ((ENIC_SETTING(enic, RSSHASH_UDP_WEAK) ? "+udp" : + "yes"))) : "no", c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" : c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" : c->intr_mode == VENET_INTR_MODE_ANY ? "any" : @@ -128,6 +136,72 @@ int enic_get_vnic_config(struct enic *enic) c->intr_timer_usec, c->loop_tag); + /* RSS settings from vNIC */ + enic->reta_size = ENIC_RSS_RETA_SIZE; + enic->hash_key_size = ENIC_RSS_HASH_KEY_SIZE; + enic->flow_type_rss_offloads = 0; + if (ENIC_SETTING(enic, RSSHASH_IPV4)) + /* + * IPV4 hash type handles both non-frag and frag packet types. + * TCP/UDP is controlled via a separate flag below. + */ + enic->flow_type_rss_offloads |= ETH_RSS_IPV4 | + ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER; + if (ENIC_SETTING(enic, RSSHASH_TCPIPV4)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP; + if (ENIC_SETTING(enic, RSSHASH_IPV6)) + /* + * The VIC adapter can perform RSS on IPv6 packets with and + * without extension headers. An IPv6 "fragment" is an IPv6 + * packet with the fragment extension header. + */ + enic->flow_type_rss_offloads |= ETH_RSS_IPV6 | + ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER; + if (ENIC_SETTING(enic, RSSHASH_TCPIPV6)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_IPV6_TCP_EX; + if (ENIC_SETTING(enic, RSSHASH_UDP_WEAK)) + enic->flow_type_rss_offloads |= + ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_IPV6_UDP_EX; + if (ENIC_SETTING(enic, RSSHASH_UDPIPV4)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP; + if (ENIC_SETTING(enic, RSSHASH_UDPIPV6)) + enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_IPV6_UDP_EX; + + /* Zero offloads if RSS is not enabled */ + if (!ENIC_SETTING(enic, RSS)) + enic->flow_type_rss_offloads = 0; + + enic->vxlan = ENIC_SETTING(enic, VXLAN) && + vnic_dev_capable_vxlan(enic->vdev); + /* + * Default hardware capabilities. enic_dev_init() may add additional + * flags if it enables overlay offloads. + */ + enic->tx_offload_capa = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + enic->rx_offload_capa = + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + enic->tx_offload_mask = + PKT_TX_VLAN_PKT | + PKT_TX_IP_CKSUM | + PKT_TX_L4_MASK | + PKT_TX_TCP_SEG; + return 0; } @@ -202,7 +276,8 @@ void enic_free_vnic_resources(struct enic *enic) vnic_rq_free(&enic->rq[i]); for (i = 0; i < enic->cq_count; i++) vnic_cq_free(&enic->cq[i]); - vnic_intr_free(&enic->intr); + for (i = 0; i < enic->intr_count; i++) + vnic_intr_free(&enic->intr[i]); } void enic_get_res_counts(struct enic *enic) diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h index cf3a6fde..e68f1307 100644 --- a/drivers/net/enic/enic_res.h +++ b/drivers/net/enic/enic_res.h @@ -31,6 +31,12 @@ #define ENIC_DEFAULT_RX_FREE_THRESH 32 #define ENIC_TX_XMIT_MAX 64 +#define ENIC_RSS_DEFAULT_CPU 0 +#define ENIC_RSS_BASE_CPU 0 +#define ENIC_RSS_HASH_BITS 7 +#define ENIC_RSS_RETA_SIZE (1 << ENIC_RSS_HASH_BITS) +#define ENIC_RSS_HASH_KEY_SIZE 40 + #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 2fe5a3fa..8853a204 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -15,15 +15,6 @@ #include #include -#define ENIC_TX_OFFLOAD_MASK ( \ - PKT_TX_VLAN_PKT | \ - PKT_TX_IP_CKSUM | \ - PKT_TX_L4_MASK | \ - PKT_TX_TCP_SEG) - -#define ENIC_TX_OFFLOAD_NOTSUP_MASK \ - (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK) - #define RTE_PMD_USE_PREFETCH #ifdef RTE_PMD_USE_PREFETCH @@ -130,30 +121,73 @@ enic_cq_rx_check_err(struct cq_desc *cqd) /* Lookup table to translate RX CQ flags to mbuf flags. */ static inline uint32_t -enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) +enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl) { struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; uint8_t cqrd_flags = cqrd->flags; + /* + * Odd-numbered entries are for tunnel packets. All packet type info + * applies to the inner packet, and there is no info on the outer + * packet. The outer flags in these entries exist only to avoid + * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf + * afterwards. + * + * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set + * RTE_PTYPE_TUNNEL_GRENAT.. + */ static const uint32_t cq_type_table[128] __rte_cache_aligned = { [0x00] = RTE_PTYPE_UNKNOWN, [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, + [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, + [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, + [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, + [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, + [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, + [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG, + [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, + [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, - [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG, - [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP, - [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP, + [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, /* All others reserved */ }; cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; - return cq_type_table[cqrd_flags]; + return cq_type_table[cqrd_flags + tnl]; } static inline void @@ -200,10 +234,18 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) uint32_t l4_flags; l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK; - if (enic_cq_rx_desc_ipv4_csum_ok(cqrd)) - pkt_flags |= PKT_RX_IP_CKSUM_GOOD; - else if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) - pkt_flags |= PKT_RX_IP_CKSUM_BAD; + /* + * When overlay offload is enabled, the NIC may + * set ipv4_csum_ok=1 if the inner packet is IPv6.. + * So, explicitly check for IPv4 before checking + * ipv4_csum_ok. + */ + if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) { + if (enic_cq_rx_desc_ipv4_csum_ok(cqrd)) + pkt_flags |= PKT_RX_IP_CKSUM_GOOD; + else + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + } if (l4_flags == RTE_PTYPE_L4_UDP || l4_flags == RTE_PTYPE_L4_TCP) { @@ -245,6 +287,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct vnic_cq *cq; volatile struct cq_desc *cqd_ptr; uint8_t color; + uint8_t tnl; uint16_t seg_length; struct rte_mbuf *first_seg = sop_rq->pkt_first_seg; struct rte_mbuf *last_seg = sop_rq->pkt_last_seg; @@ -336,10 +379,21 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, continue; } + /* + * When overlay offload is enabled, CQ.fcoe indicates the + * packet is tunnelled. + */ + tnl = enic->overlay_offload && + (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0; /* cq rx flags are only valid if eop bit is set */ - first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); + first_seg->packet_type = + enic_cq_rx_flags_to_pkt_type(&cqd, tnl); enic_cq_rx_to_pkt_flags(&cqd, first_seg); - + /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */ + if (tnl) { + first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK | + RTE_PTYPE_L4_MASK); + } if (unlikely(packet_error)) { rte_pktmbuf_free(first_seg); rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); @@ -443,9 +497,10 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) return 0; } -uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, +uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + struct vnic_wq *wq = (struct vnic_wq *)tx_queue; int32_t ret; uint16_t i; uint64_t ol_flags; @@ -454,8 +509,8 @@ uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, for (i = 0; i != nb_pkts; i++) { m = tx_pkts[i]; ol_flags = m->ol_flags; - if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + if (ol_flags & wq->tx_offload_notsup_mask) { + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG @@ -558,6 +613,11 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, offload_mode = WQ_ENET_OFFLOAD_MODE_TSO; mss = tx_pkt->tso_segsz; + /* For tunnel, need the size of outer+inner headers */ + if (ol_flags & PKT_TX_TUNNEL_MASK) { + header_len += tx_pkt->outer_l2_len + + tx_pkt->outer_l3_len; + } } if ((ol_flags & ol_flags_mask) && (header_len == 0)) { diff --git a/drivers/net/enic/meson.build b/drivers/net/enic/meson.build new file mode 100644 index 00000000..bfd4e237 --- /dev/null +++ b/drivers/net/enic/meson.build @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Cisco Systems, Inc. + +sources = files( + 'base/vnic_cq.c', + 'base/vnic_dev.c', + 'base/vnic_intr.c', + 'base/vnic_rq.c', + 'base/vnic_rss.c', + 'base/vnic_wq.c', + 'enic_clsf.c', + 'enic_ethdev.c', + 'enic_flow.c', + 'enic_main.c', + 'enic_res.c', + 'enic_rxtx.c', + ) +deps += ['hash'] +includes += include_directories('base') diff --git a/drivers/net/failsafe/Makefile b/drivers/net/failsafe/Makefile index bd2f0198..81802d09 100644 --- a/drivers/net/failsafe/Makefile +++ b/drivers/net/failsafe/Makefile @@ -1,33 +1,6 @@ -# BSD LICENSE -# -# Copyright 2017 6WIND S.A. -# Copyright 2017 Mellanox. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2017 6WIND S.A. +# Copyright 2017 Mellanox Technologies, Ltd include $(RTE_SDK)/mk/rte.vars.mk diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c index c499bfb9..eafbb75d 100644 --- a/drivers/net/failsafe/failsafe.c +++ b/drivers/net/failsafe/failsafe.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -13,6 +13,8 @@ #include "failsafe_private.h" +int failsafe_logtype; + const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME; static const struct rte_eth_link eth_link = { .link_speed = ETH_SPEED_NUM_10G, @@ -202,16 +204,25 @@ fs_eth_dev_create(struct rte_vdev_device *vdev) } snprintf(priv->my_owner.name, sizeof(priv->my_owner.name), FAILSAFE_OWNER_NAME); + DEBUG("Failsafe port %u owner info: %s_%016"PRIX64, dev->data->port_id, + priv->my_owner.name, priv->my_owner.id); + ret = rte_eth_dev_callback_register(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, + failsafe_eth_new_event_callback, + dev); + if (ret) { + ERROR("Failed to register NEW callback"); + goto free_args; + } ret = failsafe_eal_init(dev); if (ret) - goto free_args; + goto unregister_new_callback; ret = fs_mutex_init(priv); if (ret) - goto free_args; + goto unregister_new_callback; ret = failsafe_hotplug_alarm_install(dev); if (ret) { ERROR("Could not set up plug-in event detection"); - goto free_args; + goto unregister_new_callback; } mac = &dev->data->mac_addrs[0]; if (mac_from_arg) { @@ -224,7 +235,7 @@ fs_eth_dev_create(struct rte_vdev_device *vdev) mac); if (ret) { ERROR("Failed to set default MAC address"); - goto free_args; + goto cancel_alarm; } } } else { @@ -257,7 +268,13 @@ fs_eth_dev_create(struct rte_vdev_device *vdev) .fd = -1, .type = RTE_INTR_HANDLE_EXT, }; + rte_eth_dev_probing_finish(dev); return 0; +cancel_alarm: + failsafe_hotplug_alarm_cancel(dev); +unregister_new_callback: + rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, + failsafe_eth_new_event_callback, dev); free_args: failsafe_args_free(dev); free_subs: @@ -277,6 +294,8 @@ fs_rte_eth_free(const char *name) dev = rte_eth_dev_allocated(name); if (dev == NULL) return -ENODEV; + rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, + failsafe_eth_new_event_callback, dev); ret = failsafe_eal_uninit(dev); if (ret) ERROR("Error while uninitializing sub-EAL"); @@ -294,10 +313,25 @@ static int rte_pmd_failsafe_probe(struct rte_vdev_device *vdev) { const char *name; + struct rte_eth_dev *eth_dev; name = rte_vdev_device_name(vdev); INFO("Initializing " FAILSAFE_DRIVER_NAME " for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(vdev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + ERROR("Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &failsafe_ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + return fs_eth_dev_create(vdev); } @@ -318,3 +352,12 @@ static struct rte_vdev_driver failsafe_drv = { RTE_PMD_REGISTER_VDEV(net_failsafe, failsafe_drv); RTE_PMD_REGISTER_PARAM_STRING(net_failsafe, PMD_FAILSAFE_PARAM_STRING); + +RTE_INIT(failsafe_init_log); +static void +failsafe_init_log(void) +{ + failsafe_logtype = rte_log_register("pmd.net.failsafe"); + if (failsafe_logtype >= 0) + rte_log_set_level(failsafe_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c index 366dbea1..2c002b16 100644 --- a/drivers/net/failsafe/failsafe_args.c +++ b/drivers/net/failsafe/failsafe_args.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -14,6 +14,7 @@ #include #include #include +#include #include "failsafe_private.h" @@ -62,7 +63,7 @@ fs_parse_device(struct sub_device *sdev, char *args) d = &sdev->devargs; DEBUG("%s", args); - ret = rte_eal_devargs_parse(args, d); + ret = rte_devargs_parse(d, "%s", args); if (ret) { DEBUG("devargs parsing failed with code %d", ret); return ret; @@ -340,7 +341,7 @@ fs_remove_sub_devices_definition(char params[DEVARGS_MAXLEN]) a = b + 1; } out: - snprintf(params, DEVARGS_MAXLEN, "%s", buffer); + strlcpy(params, buffer, DEVARGS_MAXLEN); return 0; } @@ -392,7 +393,7 @@ failsafe_args_parse(struct rte_eth_dev *dev, const char *params) ret = 0; priv->subs_tx = FAILSAFE_MAX_ETHPORTS; /* default parameters */ - n = snprintf(mut_params, sizeof(mut_params), "%s", params); + n = strlcpy(mut_params, params, sizeof(mut_params)); if (n >= sizeof(mut_params)) { ERROR("Parameter string too long (>=%zu)", sizeof(mut_params)); diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c index c3d67312..5672f396 100644 --- a/drivers/net/failsafe/failsafe_eal.c +++ b/drivers/net/failsafe/failsafe_eal.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -18,8 +18,9 @@ fs_ethdev_portid_get(const char *name, uint16_t *port_id) return -EINVAL; } len = strlen(name); - RTE_ETH_FOREACH_DEV(pid) { - if (!strncmp(name, rte_eth_devices[pid].device->name, len)) { + for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) { + if (rte_eth_dev_is_valid_port(pid) && + !strncmp(name, rte_eth_devices[pid].device->name, len)) { *port_id = pid; return 0; } @@ -41,6 +42,8 @@ fs_bus_init(struct rte_eth_dev *dev) continue; da = &sdev->devargs; if (fs_ethdev_portid_get(da->name, &pid) != 0) { + struct rte_eth_dev_owner pid_owner; + ret = rte_eal_hotplug_add(da->bus->name, da->name, da->args); @@ -55,12 +58,26 @@ fs_bus_init(struct rte_eth_dev *dev) ERROR("sub_device %d init went wrong", i); return -ENODEV; } + /* + * The NEW callback tried to take ownership, check + * whether it succeed or didn't. + */ + rte_eth_dev_owner_get(pid, &pid_owner); + if (pid_owner.id != PRIV(dev)->my_owner.id) { + INFO("sub_device %d owner(%s_%016"PRIX64") is not my," + " owner(%s_%016"PRIX64"), will try again later", + i, pid_owner.name, pid_owner.id, + PRIV(dev)->my_owner.name, + PRIV(dev)->my_owner.id); + continue; + } } else { + /* The sub-device port was found. */ char devstr[DEVARGS_MAXLEN] = ""; struct rte_devargs *probed_da = rte_eth_devices[pid].device->devargs; - /* Take control of device probed by EAL options. */ + /* Take control of probed device. */ free(da->args); memset(da, 0, sizeof(*da)); if (probed_da != NULL) @@ -69,7 +86,7 @@ fs_bus_init(struct rte_eth_dev *dev) else snprintf(devstr, sizeof(devstr), "%s", rte_eth_devices[pid].device->name); - ret = rte_eal_devargs_parse(devstr, da); + ret = rte_devargs_parse(da, "%s", devstr); if (ret) { ERROR("Probed devargs parsing failed with code" " %d", ret); @@ -77,28 +94,28 @@ fs_bus_init(struct rte_eth_dev *dev) } INFO("Taking control of a probed sub device" " %d named %s", i, da->name); - } - ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner); - if (ret < 0) { - INFO("sub_device %d owner set failed (%s)," - " will try again later", i, strerror(-ret)); - continue; - } else if (strncmp(rte_eth_devices[pid].device->name, da->name, - strlen(da->name)) != 0) { - /* - * The device probably was removed and its port id was - * reallocated before ownership set. - */ - rte_eth_dev_owner_unset(pid, PRIV(dev)->my_owner.id); - INFO("sub_device %d was probably removed before taking" - " ownership, will try again later", i); - continue; + ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner); + if (ret < 0) { + INFO("sub_device %d owner set failed (%s), " + "will try again later", i, strerror(-ret)); + continue; + } else if (strncmp(rte_eth_devices[pid].device->name, + da->name, strlen(da->name)) != 0) { + /* + * The device probably was removed and its port + * id was reallocated before ownership set. + */ + rte_eth_dev_owner_unset(pid, + PRIV(dev)->my_owner.id); + INFO("sub_device %d was removed before taking" + " ownership, will try again later", i); + continue; + } } ETH(sdev) = &rte_eth_devices[pid]; SUB_ID(sdev) = i; sdev->fs_dev = dev; sdev->dev = ETH(sdev)->device; - ETH(sdev)->state = RTE_ETH_DEV_DEFERRED; sdev->state = DEV_PROBED; } return 0; diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c index 2c0bf936..5b5cb3b4 100644 --- a/drivers/net/failsafe/failsafe_ether.c +++ b/drivers/net/failsafe/failsafe_ether.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -260,6 +260,7 @@ fs_dev_remove(struct sub_device *sdev) sdev->state = DEV_ACTIVE; /* fallthrough */ case DEV_ACTIVE: + failsafe_eth_dev_unregister_callbacks(sdev); rte_eth_dev_close(PORT_ID(sdev)); sdev->state = DEV_PROBED; /* fallthrough */ @@ -320,6 +321,35 @@ fs_rxtx_clean(struct sub_device *sdev) return 1; } +void +failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev) +{ + int ret; + + if (sdev == NULL) + return; + if (sdev->rmv_callback) { + ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_RMV, + failsafe_eth_rmv_event_callback, + sdev); + if (ret) + WARN("Failed to unregister RMV callback for sub_device" + " %d", SUB_ID(sdev)); + sdev->rmv_callback = 0; + } + if (sdev->lsc_callback) { + ret = rte_eth_dev_callback_unregister(PORT_ID(sdev), + RTE_ETH_EVENT_INTR_LSC, + failsafe_eth_lsc_event_callback, + sdev); + if (ret) + WARN("Failed to unregister LSC callback for sub_device" + " %d", SUB_ID(sdev)); + sdev->lsc_callback = 0; + } +} + void failsafe_dev_remove(struct rte_eth_dev *dev) { @@ -463,3 +493,26 @@ failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused, else return 0; } + +/* Take sub-device ownership before it becomes exposed to the application. */ +int +failsafe_eth_new_event_callback(uint16_t port_id, + enum rte_eth_event_type event __rte_unused, + void *cb_arg, void *out __rte_unused) +{ + struct rte_eth_dev *fs_dev = cb_arg; + struct sub_device *sdev; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + uint8_t i; + + FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) { + if (sdev->state >= DEV_PROBED) + continue; + if (strcmp(sdev->devargs.name, dev->device->name) != 0) + continue; + rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner); + /* The actual owner will be checked after the port probing. */ + break; + } + return 0; +} diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c index ec8c909b..bfe42fce 100644 --- a/drivers/net/failsafe/failsafe_flow.c +++ b/drivers/net/failsafe/failsafe_flow.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -174,7 +174,7 @@ fs_flow_flush(struct rte_eth_dev *dev, static int fs_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, - enum rte_flow_action_type type, + const struct rte_flow_action *action, void *arg, struct rte_flow_error *error) { @@ -185,7 +185,7 @@ fs_flow_query(struct rte_eth_dev *dev, if (sdev != NULL) { int ret = rte_flow_query(PORT_ID(sdev), flow->flows[SUB_ID(sdev)], - type, arg, error); + action, arg, error); if ((ret = fs_err(sdev, ret))) { fs_unlock(dev, 0); diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c index 6b7f9c1a..fc6ec37f 100644 --- a/drivers/net/failsafe/failsafe_intr.c +++ b/drivers/net/failsafe/failsafe_intr.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018 Mellanox Technologies, Ltd. + * Copyright 2018 Mellanox Technologies, Ltd */ /** diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c index 057e435c..24e91c93 100644 --- a/drivers/net/failsafe/failsafe_ops.c +++ b/drivers/net/failsafe/failsafe_ops.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -13,6 +13,7 @@ #include #include #include +#include #include "failsafe_private.h" @@ -81,30 +82,22 @@ static struct rte_eth_dev_info default_infos = { DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM, - .flow_type_rss_offloads = 0x0, + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO, + .flow_type_rss_offloads = + ETH_RSS_IP | + ETH_RSS_UDP | + ETH_RSS_TCP, }; static int fs_dev_configure(struct rte_eth_dev *dev) { struct sub_device *sdev; - uint64_t supp_tx_offloads; - uint64_t tx_offloads; uint8_t i; int ret; fs_lock(dev, 0); - supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa; - tx_offloads = dev->data->dev_conf.txmode.offloads; - if ((tx_offloads & supp_tx_offloads) != tx_offloads) { - rte_errno = ENOTSUP; - ERROR("Some Tx offloads are not supported, " - "requested 0x%" PRIx64 " supported 0x%" PRIx64, - tx_offloads, supp_tx_offloads); - fs_unlock(dev, 0); - return -rte_errno; - } FOREACH_SUBDEV(sdev, i, dev) { int rmv_interrupt = 0; int lsc_interrupt = 0; @@ -145,7 +138,7 @@ fs_dev_configure(struct rte_eth_dev *dev) fs_unlock(dev, 0); return ret; } - if (rmv_interrupt) { + if (rmv_interrupt && sdev->rmv_callback == 0) { ret = rte_eth_dev_callback_register(PORT_ID(sdev), RTE_ETH_EVENT_INTR_RMV, failsafe_eth_rmv_event_callback, @@ -153,9 +146,11 @@ fs_dev_configure(struct rte_eth_dev *dev) if (ret) WARN("Failed to register RMV callback for sub_device %d", SUB_ID(sdev)); + else + sdev->rmv_callback = 1; } dev->data->dev_conf.intr_conf.rmv = 0; - if (lsc_interrupt) { + if (lsc_interrupt && sdev->lsc_callback == 0) { ret = rte_eth_dev_callback_register(PORT_ID(sdev), RTE_ETH_EVENT_INTR_LSC, failsafe_eth_lsc_event_callback, @@ -163,6 +158,8 @@ fs_dev_configure(struct rte_eth_dev *dev) if (ret) WARN("Failed to register LSC callback for sub_device %d", SUB_ID(sdev)); + else + sdev->lsc_callback = 1; } dev->data->dev_conf.intr_conf.lsc = lsc_enabled; sdev->state = DEV_ACTIVE; @@ -289,6 +286,7 @@ fs_dev_close(struct rte_eth_dev *dev) PRIV(dev)->state = DEV_ACTIVE - 1; FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { DEBUG("Closing sub_device %d", i); + failsafe_eth_dev_unregister_callbacks(sdev); rte_eth_dev_close(PORT_ID(sdev)); sdev->state = DEV_ACTIVE - 1; } @@ -296,25 +294,6 @@ fs_dev_close(struct rte_eth_dev *dev) fs_unlock(dev, 0); } -static bool -fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads) -{ - uint64_t port_offloads; - uint64_t queue_supp_offloads; - uint64_t port_supp_offloads; - - port_offloads = dev->data->dev_conf.rxmode.offloads; - queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa; - port_supp_offloads = PRIV(dev)->infos.rx_offload_capa; - if ((offloads & (queue_supp_offloads | port_supp_offloads)) != - offloads) - return false; - /* Verify we have no conflict with port offloads */ - if ((port_offloads ^ offloads) & port_supp_offloads) - return false; - return true; -} - static void fs_rx_queue_release(void *queue) { @@ -367,19 +346,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev, fs_rx_queue_release(rxq); dev->data->rx_queues[rx_queue_id] = NULL; } - /* Verify application offloads are valid for our port and queue. */ - if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) { - rte_errno = ENOTSUP; - ERROR("Rx queue offloads 0x%" PRIx64 - " don't match port offloads 0x%" PRIx64 - " or supported offloads 0x%" PRIx64, - rx_conf->offloads, - dev->data->dev_conf.rxmode.offloads, - PRIV(dev)->infos.rx_offload_capa | - PRIV(dev)->infos.rx_queue_offload_capa); - fs_unlock(dev, 0); - return -rte_errno; - } rxq = rte_zmalloc(NULL, sizeof(*rxq) + sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, @@ -498,25 +464,6 @@ unlock: return rc; } -static bool -fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads) -{ - uint64_t port_offloads; - uint64_t queue_supp_offloads; - uint64_t port_supp_offloads; - - port_offloads = dev->data->dev_conf.txmode.offloads; - queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa; - port_supp_offloads = PRIV(dev)->infos.tx_offload_capa; - if ((offloads & (queue_supp_offloads | port_supp_offloads)) != - offloads) - return false; - /* Verify we have no conflict with port offloads */ - if ((port_offloads ^ offloads) & port_supp_offloads) - return false; - return true; -} - static void fs_tx_queue_release(void *queue) { @@ -556,24 +503,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev, fs_tx_queue_release(txq); dev->data->tx_queues[tx_queue_id] = NULL; } - /* - * Don't verify queue offloads for applications which - * use the old API. - */ - if (tx_conf != NULL && - (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && - fs_txq_offloads_valid(dev, tx_conf->offloads) == false) { - rte_errno = ENOTSUP; - ERROR("Tx queue offloads 0x%" PRIx64 - " don't match port offloads 0x%" PRIx64 - " or supported offloads 0x%" PRIx64, - tx_conf->offloads, - dev->data->dev_conf.txmode.offloads, - PRIV(dev)->infos.tx_offload_capa | - PRIV(dev)->infos.tx_queue_offload_capa); - fs_unlock(dev, 0); - return -rte_errno; - } txq = rte_zmalloc("ethdev TX queue", sizeof(*txq) + sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail, @@ -804,26 +733,29 @@ fs_dev_infos_get(struct rte_eth_dev *dev, } else { uint64_t rx_offload_capa; uint64_t rxq_offload_capa; + uint64_t rss_hf_offload_capa; rx_offload_capa = default_infos.rx_offload_capa; rxq_offload_capa = default_infos.rx_queue_offload_capa; + rss_hf_offload_capa = default_infos.flow_type_rss_offloads; FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos); rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa; rxq_offload_capa &= PRIV(dev)->infos.rx_queue_offload_capa; + rss_hf_offload_capa &= + PRIV(dev)->infos.flow_type_rss_offloads; } sdev = TX_SUBDEV(dev); rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos); PRIV(dev)->infos.rx_offload_capa = rx_offload_capa; PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa; + PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa; PRIV(dev)->infos.tx_offload_capa &= default_infos.tx_offload_capa; PRIV(dev)->infos.tx_queue_offload_capa &= default_infos.tx_queue_offload_capa; - PRIV(dev)->infos.flow_type_rss_offloads &= - default_infos.flow_type_rss_offloads; } rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos)); } @@ -997,16 +929,52 @@ fs_mac_addr_add(struct rte_eth_dev *dev, return 0; } -static void +static int fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct sub_device *sdev; uint8_t i; + int ret; fs_lock(dev, 0); - FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) - rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d", + i, ret); + fs_unlock(dev, 0); + return ret; + } + } + fs_unlock(dev, 0); + + return 0; +} + +static int +fs_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct sub_device *sdev; + uint8_t i; + int ret; + + fs_lock(dev, 0); + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf); + ret = fs_err(sdev, ret); + if (ret) { + ERROR("Operation rte_eth_dev_rss_hash_update" + " failed for sub_device %d with error %d", + i, ret); + fs_unlock(dev, 0); + return ret; + } + } fs_unlock(dev, 0); + + return 0; } static int @@ -1068,5 +1036,6 @@ const struct eth_dev_ops failsafe_ops = { .mac_addr_remove = fs_mac_addr_remove, .mac_addr_add = fs_mac_addr_add, .mac_addr_set = fs_mac_addr_set, + .rss_hash_update = fs_rss_hash_update, .filter_ctrl = fs_filter_ctrl, }; diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h index 2d16ba4c..886af861 100644 --- a/drivers/net/failsafe/failsafe_private.h +++ b/drivers/net/failsafe/failsafe_private.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_ @@ -119,6 +119,10 @@ struct sub_device { volatile unsigned int remove:1; /* flow isolation state */ int flow_isolated:1; + /* RMV callback registration state */ + unsigned int rmv_callback:1; + /* LSC callback registration state */ + unsigned int lsc_callback:1; }; struct fs_priv { @@ -211,6 +215,7 @@ int failsafe_eal_uninit(struct rte_eth_dev *dev); /* ETH_DEV */ int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev); +void failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev); void failsafe_dev_remove(struct rte_eth_dev *dev); void failsafe_stats_increment(struct rte_eth_stats *to, struct rte_eth_stats *from); @@ -220,6 +225,9 @@ int failsafe_eth_rmv_event_callback(uint16_t port_id, int failsafe_eth_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *out); +int failsafe_eth_new_event_callback(uint16_t port_id, + enum rte_eth_event_type event, + void *cb_arg, void *out); /* GLOBALS */ @@ -326,8 +334,12 @@ extern int mac_from_arg; #define FS_THREADID_FMT "lu" #endif -#define LOG__(level, m, ...) \ - RTE_LOG(level, PMD, "net_failsafe: " m "%c", __VA_ARGS__) +extern int failsafe_logtype; + +#define LOG__(l, m, ...) \ + rte_log(RTE_LOG_ ## l, failsafe_logtype, \ + "net_failsafe: " m "%c", __VA_ARGS__) + #define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n') #define DEBUG(...) LOG_(DEBUG, __VA_ARGS__) #define INFO(...) LOG_(INFO, __VA_ARGS__) diff --git a/drivers/net/failsafe/failsafe_rxtx.c b/drivers/net/failsafe/failsafe_rxtx.c index 363cf7ba..7bd0f963 100644 --- a/drivers/net/failsafe/failsafe_rxtx.c +++ b/drivers/net/failsafe/failsafe_rxtx.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile index b059a700..d657dff8 100644 --- a/drivers/net/fm10k/Makefile +++ b/drivers/net/fm10k/Makefile @@ -19,7 +19,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h index 30dad3e2..ef307809 100644 --- a/drivers/net/fm10k/fm10k.h +++ b/drivers/net/fm10k/fm10k.h @@ -180,6 +180,7 @@ struct fm10k_rx_queue { uint8_t drop_en; uint8_t rx_deferred_start; /* don't start this queue in dev start. */ uint16_t rx_ftag_en; /* indicates FTAG RX supported */ + uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */ }; /* @@ -211,7 +212,7 @@ struct fm10k_tx_queue { uint16_t next_rs; /* Next pos to set RS flag */ uint16_t next_dd; /* Next pos to check DD flag */ volatile uint32_t *tail_ptr; - uint32_t txq_flags; /* Holds flags for this TXq */ + uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */ uint16_t nb_desc; uint16_t port_id; uint8_t tx_deferred_start; /** don't start this queue in dev start. */ diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index 94237610..3ff1b0e0 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -60,6 +60,13 @@ static void fm10k_set_tx_function(struct rte_eth_dev *dev); static int fm10k_check_ftag(struct rte_devargs *devargs); static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete); +static void fm10k_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); +static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev); + struct fm10k_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; unsigned offset; @@ -444,8 +451,10 @@ fm10k_dev_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.hw_strip_crc == 0) + if ((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) == 0) PMD_INIT_LOG(WARNING, "fm10k always strip CRC"); + /* multipe queue mode checking */ ret = fm10k_check_mq_mode(dev); if (ret != 0) { @@ -454,6 +463,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev) return ret; } + dev->data->scattered_rx = 0; + return 0; } @@ -756,7 +767,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) /* It adds dual VLAN length for supporting dual VLAN */ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * FM10K_VLAN_TAG_SIZE) > buf_size || - dev->data->dev_conf.rxmode.enable_scatter) { + rxq->offloads & DEV_RX_OFFLOAD_SCATTER) { uint32_t reg; dev->data->scattered_rx = 1; reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i)); @@ -1233,13 +1244,11 @@ fm10k_link_update(struct rte_eth_dev *dev, FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we - * leave the speed undefined since there is no 50Gbps Ethernet. - */ - dev->data->dev_link.link_speed = 0; + dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G; dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; dev->data->dev_link.link_status = dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP; + dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; return 0; } @@ -1377,7 +1386,6 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - dev_info->pci_dev = pdev; dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE; dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE; dev_info->max_rx_queues = hw->mac.max_queues; @@ -1389,17 +1397,12 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, dev_info->vmdq_queue_base = 0; dev_info->max_vmdq_pools = ETH_32_POOLS; dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t); dev_info->reta_size = FM10K_MAX_RSS_INDICES; @@ -1412,6 +1415,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, }, .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0), .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -1422,7 +1426,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, }, .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0), .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0), - .txq_flags = FM10K_SIMPLE_TX_FLAG, + .offloads = 0, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -1571,19 +1575,22 @@ static int fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask) { if (mask & ETH_VLAN_STRIP_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_strip) + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP)) PMD_INIT_LOG(ERR, "VLAN stripping is " "always on in fm10k"); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) PMD_INIT_LOG(ERR, "VLAN QinQ is not " "supported in fm10k"); } if (mask & ETH_VLAN_FILTER_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_filter) + if (!(dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER)) PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k"); } @@ -1781,6 +1788,27 @@ mempool_element_size_valid(struct rte_mempool *mp) return 1; } +static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_RX_OFFLOAD_SCATTER); +} + +static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_HEADER_SPLIT); +} + static int fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, unsigned int socket_id, @@ -1791,9 +1819,12 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private); struct fm10k_rx_queue *q; const struct rte_memzone *mz; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* make sure the mempool element size can account for alignment. */ if (!mempool_element_size_valid(mp)) { PMD_INIT_LOG(ERR, "Error : Mempool element size is too small"); @@ -1838,6 +1869,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, q->queue_id = queue_id; q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)]; + q->offloads = offloads; if (handle_rxconf(q, conf)) return -EINVAL; @@ -1947,6 +1979,24 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) return 0; } +static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO); +} + static int fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, unsigned int socket_id, @@ -1955,9 +2005,12 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct fm10k_tx_queue *q; const struct rte_memzone *mz; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = conf->offloads | dev->data->dev_conf.txmode.offloads; + /* make sure a valid number of descriptors have been requested */ if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC, FM10K_MULT_TX_DESC, nb_desc)) { @@ -1994,7 +2047,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, q->nb_desc = nb_desc; q->port_id = dev->data->port_id; q->queue_id = queue_id; - q->txq_flags = conf->txq_flags; + q->offloads = offloads; q->ops = &def_txq_ops; q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)]; @@ -2860,7 +2913,7 @@ fm10k_set_tx_function(struct rte_eth_dev *dev) uint16_t tx_ftag_en = 0; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - /* primary process has set the ftag flag and txq_flags */ + /* primary process has set the ftag flag and offloads */ txq = dev->data->tx_queues[0]; if (fm10k_tx_vec_condition_check(txq)) { dev->tx_pkt_burst = fm10k_xmit_pkts; diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c index 498a1781..005fda63 100644 --- a/drivers/net/fm10k/fm10k_rxtx_vec.c +++ b/drivers/net/fm10k/fm10k_rxtx_vec.c @@ -210,7 +210,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev) #ifndef RTE_FM10K_RX_OLFLAGS_ENABLE /* whithout rx ol_flags, no VP flag report */ - if (rxmode->hw_vlan_extend != 0) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) return -1; #endif @@ -219,7 +219,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev) return -1; /* no header split support */ - if (rxmode->header_split == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) return -1; return 0; @@ -695,7 +695,7 @@ int __attribute__((cold)) fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq) { /* Vector TX can't offload any features yet */ - if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) != FM10K_SIMPLE_TX_FLAG) + if (txq->offloads != 0) return -1; if (txq->tx_ftag_en) diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile index 5663f5b1..3f869a8d 100644 --- a/drivers/net/i40e/Makefile +++ b/drivers/net/i40e/Makefile @@ -11,6 +11,8 @@ LIB = librte_pmd_i40e.a CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF CFLAGS += -DX722_A0_SUPPORT +CFLAGS += -DALLOW_EXPERIMENTAL_API + LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash LDLIBS += -lrte_bus_pci @@ -24,7 +26,7 @@ LIBABIVER := 2 # to disable warnings # ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) -CFLAGS_BASE_DRIVER = -wd593 -wd188 +CFLAGS_BASE_DRIVER = -diag-disable 593 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) CFLAGS_BASE_DRIVER += -Wno-sign-compare CFLAGS_BASE_DRIVER += -Wno-unused-value @@ -85,6 +87,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_vf_representor.c ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2) CC_AVX2_SUPPORT=1 diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h index a482ab90..df66e76a 100644 --- a/drivers/net/i40e/base/i40e_register.h +++ b/drivers/net/i40e/base/i40e_register.h @@ -90,7 +90,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ #define I40E_PF_ARQT_ARQT_SHIFT 0 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) @@ -113,7 +113,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ #define I40E_PF_ATQT_ATQT_SHIFT 0 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) @@ -140,7 +140,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQT_MAX_INDEX 127 #define I40E_VF_ARQT_ARQT_SHIFT 0 @@ -168,7 +168,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQT_MAX_INDEX 127 #define I40E_VF_ATQT_ATQT_SHIFT 0 @@ -291,7 +291,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) @@ -535,7 +535,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 -#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSRWD_MAX_INDEX 3 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 @@ -1274,14 +1274,14 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 -#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 -#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ #define I40E_QRX_ENA_MAX_INDEX 1535 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 @@ -1692,7 +1692,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_GLNVM_SRCTL_START_SHIFT 30 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 -#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) @@ -3059,7 +3059,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 -#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VP_MDET_RX_MAX_INDEX 127 #define I40E_VP_MDET_RX_VALID_SHIFT 0 @@ -3196,7 +3196,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ #define I40E_VF_ARQT1_ARQT_SHIFT 0 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) @@ -3219,7 +3219,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ #define I40E_VF_ATQT1_ATQT_SHIFT 0 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 508b4171..13c5d329 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -213,7 +214,7 @@ /* Bit mask of Extended Tag enable/disable */ #define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT) -static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev); +static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); static int i40e_dev_configure(struct rte_eth_dev *dev); static int i40e_dev_start(struct rte_eth_dev *dev); @@ -369,7 +370,12 @@ static int i40e_get_eeprom_length(struct rte_eth_dev *dev); static int i40e_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); -static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, +static int i40e_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int i40e_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + +static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); @@ -489,6 +495,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .get_reg = i40e_get_regs, .get_eeprom_length = i40e_get_eeprom_length, .get_eeprom = i40e_get_eeprom, + .get_module_info = i40e_get_module_info, + .get_module_eeprom = i40e_get_module_eeprom, .mac_addr_set = i40e_set_default_mac_addr, .mtu_set = i40e_dev_mtu_set, .tm_ops_get = i40e_tm_ops_get, @@ -607,16 +615,74 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = { #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \ sizeof(rte_i40e_txq_prio_strings[0])) -static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, +static int +eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_probe(pci_dev, - sizeof(struct i40e_adapter), eth_i40e_dev_init); + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; + int i, retval; + + if (pci_dev->device.devargs) { + retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, + ð_da); + if (retval) + return retval; + } + + retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, + sizeof(struct i40e_adapter), + eth_dev_pci_specific_init, pci_dev, + eth_i40e_dev_init, NULL); + + if (retval || eth_da.nb_representor_ports < 1) + return retval; + + /* probe VF representor ports */ + struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated( + pci_dev->device.name); + + if (pf_ethdev == NULL) + return -ENODEV; + + for (i = 0; i < eth_da.nb_representor_ports; i++) { + struct i40e_vf_representor representor = { + .vf_id = eth_da.representor_ports[i], + .switch_domain_id = I40E_DEV_PRIVATE_TO_PF( + pf_ethdev->data->dev_private)->switch_domain_id, + .adapter = I40E_DEV_PRIVATE_TO_ADAPTER( + pf_ethdev->data->dev_private) + }; + + /* representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_representor_%d", + pci_dev->device.name, eth_da.representor_ports[i]); + + retval = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct i40e_vf_representor), NULL, NULL, + i40e_vf_representor_init, &representor); + + if (retval) + PMD_DRV_LOG(ERR, "failed to create i40e vf " + "representor %s.", name); + } + + return 0; } static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit); + struct rte_eth_dev *ethdev; + + ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (!ethdev) + return -ENODEV; + + + if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) + return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit); + else + return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit); } static struct rte_pci_driver rte_i40e_pmd = { @@ -627,41 +693,17 @@ static struct rte_pci_driver rte_i40e_pmd = { .remove = eth_i40e_pci_remove, }; -static inline int -rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -static inline int -rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - static inline void -i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) +i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr, + uint32_t reg_val) { + uint32_t ori_reg_val; + + ori_reg_val = i40e_read_rx_ctl(hw, reg_addr); i40e_write_rx_ctl(hw, reg_addr, reg_val); - PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " - "with value 0x%08x", - reg_addr, reg_val); + PMD_DRV_LOG(DEBUG, + "Global register [0x%08x] original: 0x%08x, after: 0x%08x", + reg_addr, ori_reg_val, reg_val); } RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd); @@ -1118,7 +1160,31 @@ i40e_support_multi_driver(struct rte_eth_dev *dev) } static int -eth_i40e_dev_init(struct rte_eth_dev *dev) +i40e_aq_debug_write_global_register(struct i40e_hw *hw, + uint32_t reg_addr, uint64_t reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + uint64_t ori_reg_val; + int ret; + + ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Fail to debug read from 0x%08x", + reg_addr); + return -EIO; + } + + PMD_DRV_LOG(DEBUG, + "Global register [0x%08x] original: 0x%"PRIx64 + ", after: 0x%"PRIx64, + reg_addr, ori_reg_val, reg_val); + + return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details); +} + +static int +eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) { struct rte_pci_device *pci_dev; struct rte_intr_handle *intr_handle; @@ -1224,7 +1290,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* initialise the L3_MAP register */ if (!pf->support_multi_driver) { - ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40), + ret = i40e_aq_debug_write_global_register(hw, + I40E_GLQF_L3_MAP(40), 0x00000028, NULL); if (ret) PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", @@ -1533,6 +1600,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) struct rte_flow *p_flow; int ret; uint8_t aq_fail = 0; + int retries = 0; PMD_INIT_FUNC_TRACE(); @@ -1544,6 +1612,10 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) pci_dev = RTE_ETH_DEV_TO_PCI(dev); intr_handle = &pci_dev->intr_handle; + ret = rte_eth_switch_domain_free(pf->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); + if (hw->adapter_stopped == 0) i40e_dev_close(dev); @@ -1574,9 +1646,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); - /* register callback func to eal lib */ - rte_intr_callback_unregister(intr_handle, - i40e_dev_interrupt_handler, dev); + /* unregister callback func to eal lib */ + do { + ret = rte_intr_callback_unregister(intr_handle, + i40e_dev_interrupt_handler, dev); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + return ret; + } + i40e_msec_delay(500); + } while (retries++ < 5); i40e_rm_ethtype_filter_list(pf); i40e_rm_tunnel_filter_list(pf); @@ -2286,6 +2369,8 @@ i40e_dev_close(struct rte_eth_dev *dev) i40e_pf_disable_irq0(hw); rte_intr_disable(intr_handle); + i40e_fdir_teardown(pf); + /* shutdown and destroy the HMC */ i40e_shutdown_lan_hmc(hw); @@ -2297,7 +2382,6 @@ i40e_dev_close(struct rte_eth_dev *dev) pf->vmdq = NULL; /* release all the existing VSIs and VEBs */ - i40e_fdir_teardown(pf); i40e_vsi_release(pf->main_vsi); /* shutdown the adminq */ @@ -2339,7 +2423,7 @@ i40e_dev_reset(struct rte_eth_dev *dev) if (ret) return ret; - ret = eth_i40e_dev_init(dev); + ret = eth_i40e_dev_init(dev, NULL); return ret; } @@ -2437,84 +2521,143 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev) return i40e_phy_conf_link(hw, abilities, speed, false); } -int -i40e_dev_link_update(struct rte_eth_dev *dev, - int wait_to_complete) +static __rte_always_inline void +update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link) +{ +/* Link status registers and values*/ +#define I40E_PRTMAC_LINKSTA 0x001E2420 +#define I40E_REG_LINK_UP 0x40000080 +#define I40E_PRTMAC_MACC 0x001E24E0 +#define I40E_REG_MACC_25GB 0x00020000 +#define I40E_REG_SPEED_MASK 0x38000000 +#define I40E_REG_SPEED_100MB 0x00000000 +#define I40E_REG_SPEED_1GB 0x08000000 +#define I40E_REG_SPEED_10GB 0x10000000 +#define I40E_REG_SPEED_20GB 0x20000000 +#define I40E_REG_SPEED_25_40GB 0x18000000 + uint32_t link_speed; + uint32_t reg_val; + + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA); + link_speed = reg_val & I40E_REG_SPEED_MASK; + reg_val &= I40E_REG_LINK_UP; + link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0; + + if (unlikely(link->link_status == 0)) + return; + + /* Parse the link status */ + switch (link_speed) { + case I40E_REG_SPEED_100MB: + link->link_speed = ETH_SPEED_NUM_100M; + break; + case I40E_REG_SPEED_1GB: + link->link_speed = ETH_SPEED_NUM_1G; + break; + case I40E_REG_SPEED_10GB: + link->link_speed = ETH_SPEED_NUM_10G; + break; + case I40E_REG_SPEED_20GB: + link->link_speed = ETH_SPEED_NUM_20G; + break; + case I40E_REG_SPEED_25_40GB: + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC); + + if (reg_val & I40E_REG_MACC_25GB) + link->link_speed = ETH_SPEED_NUM_25G; + else + link->link_speed = ETH_SPEED_NUM_40G; + + break; + default: + PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed); + break; + } +} + +static __rte_always_inline void +update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link, + bool enable_lse, int wait_to_complete) { -#define CHECK_INTERVAL 100 /* 100ms */ -#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ + uint32_t rep_cnt = MAX_REPEAT_TIME; struct i40e_link_status link_status; - struct rte_eth_link link, old; int status; - unsigned rep_cnt = MAX_REPEAT_TIME; - bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; - memset(&link, 0, sizeof(link)); - memset(&old, 0, sizeof(old)); memset(&link_status, 0, sizeof(link_status)); - rte_i40e_dev_atomic_read_link_status(dev, &old); do { + memset(&link_status, 0, sizeof(link_status)); + /* Get link status information from hardware */ status = i40e_aq_get_link_info(hw, enable_lse, &link_status, NULL); - if (status != I40E_SUCCESS) { - link.link_speed = ETH_SPEED_NUM_100M; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + if (unlikely(status != I40E_SUCCESS)) { + link->link_speed = ETH_SPEED_NUM_100M; + link->link_duplex = ETH_LINK_FULL_DUPLEX; PMD_DRV_LOG(ERR, "Failed to get link info"); - goto out; + return; } - link.link_status = link_status.link_info & I40E_AQ_LINK_UP; - if (!wait_to_complete || link.link_status) + link->link_status = link_status.link_info & I40E_AQ_LINK_UP; + if (!wait_to_complete || link->link_status) break; rte_delay_ms(CHECK_INTERVAL); } while (--rep_cnt); - if (!link.link_status) - goto out; - - /* i40e uses full duplex only */ - link.link_duplex = ETH_LINK_FULL_DUPLEX; - /* Parse the link status */ switch (link_status.link_speed) { case I40E_LINK_SPEED_100MB: - link.link_speed = ETH_SPEED_NUM_100M; + link->link_speed = ETH_SPEED_NUM_100M; break; case I40E_LINK_SPEED_1GB: - link.link_speed = ETH_SPEED_NUM_1G; + link->link_speed = ETH_SPEED_NUM_1G; break; case I40E_LINK_SPEED_10GB: - link.link_speed = ETH_SPEED_NUM_10G; + link->link_speed = ETH_SPEED_NUM_10G; break; case I40E_LINK_SPEED_20GB: - link.link_speed = ETH_SPEED_NUM_20G; + link->link_speed = ETH_SPEED_NUM_20G; break; case I40E_LINK_SPEED_25GB: - link.link_speed = ETH_SPEED_NUM_25G; + link->link_speed = ETH_SPEED_NUM_25G; break; case I40E_LINK_SPEED_40GB: - link.link_speed = ETH_SPEED_NUM_40G; + link->link_speed = ETH_SPEED_NUM_40G; break; default: - link.link_speed = ETH_SPEED_NUM_100M; + link->link_speed = ETH_SPEED_NUM_100M; break; } +} +int +i40e_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link; + bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; + int ret; + + memset(&link, 0, sizeof(link)); + + /* i40e uses full duplex only */ + link.link_duplex = ETH_LINK_FULL_DUPLEX; link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); -out: - rte_i40e_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; + if (!wait_to_complete && !enable_lse) + update_link_reg(hw, &link); + else + update_link_aq(hw, &link, enable_lse, wait_to_complete); + ret = rte_eth_linkstatus_set(dev, &link); i40e_notify_all_vfs_link_status(dev); - return 0; + return ret; } /* Get all the statistics of a VSI */ @@ -3169,13 +3312,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct i40e_vsi *vsi = pf->main_vsi; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = vsi->nb_qps; dev_info->max_tx_queues = vsi->nb_qps; dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; dev_info->max_mac_addrs = vsi->max_macaddrs; dev_info->max_vfs = pci_dev->max_vfs; + dev_info->rx_queue_offload_capa = 0; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_QINQ_STRIP | @@ -3183,7 +3326,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | @@ -3196,7 +3344,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_IPIP_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS | + dev_info->tx_queue_offload_capa; + dev_info->dev_capa = + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); dev_info->reta_size = pf->hash_lut_size; @@ -3210,6 +3364,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3220,8 +3375,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -3248,15 +3402,42 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_tx_queues += dev_info->vmdq_queue_num; } - if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) + if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { /* For XL710 */ dev_info->speed_capa = ETH_LINK_SPEED_40G; - else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) + dev_info->default_rxportconf.nb_queues = 2; + dev_info->default_txportconf.nb_queues = 2; + if (dev->data->nb_rx_queues == 1) + dev_info->default_rxportconf.ring_size = 2048; + else + dev_info->default_rxportconf.ring_size = 1024; + if (dev->data->nb_tx_queues == 1) + dev_info->default_txportconf.ring_size = 1024; + else + dev_info->default_txportconf.ring_size = 512; + + } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) { /* For XXV710 */ dev_info->speed_capa = ETH_LINK_SPEED_25G; - else + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + } else { /* For X710 */ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) { + dev_info->default_rxportconf.ring_size = 512; + dev_info->default_txportconf.ring_size = 256; + } else { + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + } + } + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; } static int @@ -3307,7 +3488,8 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev, return 0; } - ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + ret = i40e_aq_debug_write_global_register(hw, + I40E_GL_SWT_L2TAGCTRL(reg_id), reg_w, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, @@ -3319,6 +3501,8 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev, "Global register 0x%08x is changed with value 0x%08x", I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w); + i40e_global_cfg_warning(I40E_WARNING_TPID); + return 0; } @@ -3329,7 +3513,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; int ret = 0; if ((vlan_type != ETH_VLAN_TYPE_INNER && @@ -3367,7 +3552,6 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, /* If NVM API < 1.7, keep the register setting */ ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type, tpid, qinq); - i40e_global_cfg_warning(I40E_WARNING_TPID); return ret; } @@ -3377,9 +3561,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *vsi = pf->main_vsi; + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) i40e_vsi_config_vlan_filter(vsi, TRUE); else i40e_vsi_config_vlan_filter(vsi, FALSE); @@ -3387,14 +3573,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) i40e_vsi_config_vlan_stripping(vsi, TRUE); else i40e_vsi_config_vlan_stripping(vsi, FALSE); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) { i40e_vsi_config_double_vlan(vsi, TRUE); /* Set global registers with default ethertype. */ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, @@ -3641,6 +3827,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev, struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_mac_filter_info mac_filter; struct i40e_vsi *vsi; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; int ret; /* If VMDQ not enabled or configured, return */ @@ -3659,7 +3846,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev, } rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; else mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; @@ -4010,8 +4197,8 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw, return I40E_ERR_PARAM; snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand()); - mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0, - alignment, RTE_PGSIZE_2M); + mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, + RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M); if (!mz) return I40E_ERR_NO_MEMORY; @@ -5669,6 +5856,12 @@ i40e_pf_setup(struct i40e_pf *pf) PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret); return ret; } + + ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, + "failed to allocate switch domain for device %d", ret); + if (pf->flags & I40E_FLAG_FDIR) { /* make queue allocated first, let FDIR use queue pair 0*/ ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR); @@ -8194,7 +8387,8 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) } if (reg != val) { - ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2), + ret = i40e_aq_debug_write_global_register(hw, + I40E_GL_PRS_FVBM(2), reg, NULL); if (ret != 0) return ret; @@ -9087,11 +9281,11 @@ i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { uint32_t reg = i40e_read_rx_ctl(hw, addr); - PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg); if (reg != val) - i40e_write_global_rx_ctl(hw, addr, val); - PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr, - (uint32_t)i40e_read_rx_ctl(hw, addr)); + i40e_write_rx_ctl(hw, addr, val); + PMD_DRV_LOG(DEBUG, + "Global register [0x%08x] original: 0x%08x, after: 0x%08x", + addr, reg, (uint32_t)i40e_read_rx_ctl(hw, addr)); } static void @@ -10321,9 +10515,8 @@ i40e_start_timecounters(struct rte_eth_dev *dev) uint32_t tsync_inc_h; /* Get current link speed. */ - memset(&link, 0, sizeof(link)); i40e_dev_link_update(dev, 1); - rte_i40e_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); switch (link.link_speed) { case ETH_SPEED_NUM_40G: @@ -11249,8 +11442,148 @@ static int i40e_get_eeprom(struct rte_eth_dev *dev, return 0; } -static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr) +static int i40e_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t sff8472_comp = 0; + uint32_t sff8472_swap = 0; + uint32_t sff8636_rev = 0; + i40e_status status; + uint32_t type = 0; + + /* Check if firmware supports reading module EEPROM. */ + if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { + PMD_DRV_LOG(ERR, + "Module EEPROM memory read not supported. " + "Please update the NVM image.\n"); + return -EINVAL; + } + + status = i40e_update_link_info(hw); + if (status) + return -EIO; + + if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) { + PMD_DRV_LOG(ERR, + "Cannot read module EEPROM memory. " + "No module connected.\n"); + return -EINVAL; + } + + type = hw->phy.link_info.module_type[0]; + + switch (type) { + case I40E_MODULE_TYPE_SFP: + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + I40E_I2C_EEPROM_DEV_ADDR, + I40E_MODULE_SFF_8472_COMP, + &sff8472_comp, NULL); + if (status) + return -EIO; + + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + I40E_I2C_EEPROM_DEV_ADDR, + I40E_MODULE_SFF_8472_SWAP, + &sff8472_swap, NULL); + if (status) + return -EIO; + + /* Check if the module requires address swap to access + * the other EEPROM memory page. + */ + if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) { + PMD_DRV_LOG(WARNING, + "Module address swap to access " + "page 0xA2 is not supported.\n"); + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else if (sff8472_comp == 0x00) { + /* Module is not SFF-8472 compliant */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + break; + case I40E_MODULE_TYPE_QSFP_PLUS: + /* Read from memory page 0. */ + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + 0, + I40E_MODULE_REVISION_ADDR, + &sff8636_rev, NULL); + if (status) + return -EIO; + /* Determine revision compliance byte */ + if (sff8636_rev > 0x02) { + /* Module is SFF-8636 compliant */ + modinfo->type = RTE_ETH_MODULE_SFF_8636; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + } else { + modinfo->type = RTE_ETH_MODULE_SFF_8436; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + } + break; + case I40E_MODULE_TYPE_QSFP28: + modinfo->type = RTE_ETH_MODULE_SFF_8636; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + break; + default: + PMD_DRV_LOG(ERR, "Module type unrecognized\n"); + return -EINVAL; + } + return 0; +} + +static int i40e_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + bool is_sfp = false; + i40e_status status; + uint8_t *data = info->data; + uint32_t value = 0; + uint32_t i; + + if (!info || !info->length || !data) + return -EINVAL; + + if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP) + is_sfp = true; + + for (i = 0; i < info->length; i++) { + u32 offset = i + info->offset; + u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0; + + /* Check if we need to access the other memory page */ + if (is_sfp) { + if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) { + offset -= RTE_ETH_MODULE_SFF_8079_LEN; + addr = I40E_I2C_EEPROM_DEV_ADDR2; + } + } else { + while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) { + /* Compute memory page number and offset. */ + offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2; + addr++; + } + } + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + addr, offset, &value, NULL); + if (status) + return -EIO; + data[i] = (uint8_t)value; + } + return 0; +} + +static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -11261,7 +11594,7 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, if (!is_valid_assigned_ether_addr(mac_addr)) { PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); - return; + return -EINVAL; } TAILQ_FOREACH(f, &vsi->mac_list, next) { @@ -11271,25 +11604,31 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, if (f == NULL) { PMD_DRV_LOG(ERR, "Failed to find filter for default mac"); - return; + return -EIO; } mac_filter = f->mac_info; ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to delete mac filter"); - return; + return -EIO; } memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN); ret = i40e_vsi_add_mac(vsi, &mac_filter); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to add mac filter"); - return; + return -EIO; } memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN); - i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, - mac_addr->addr_bytes, NULL); + ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, + mac_addr->addr_bytes, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to change mac"); + return -EIO; + } + + return 0; } static int @@ -11312,9 +11651,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } if (frame_size > ETHER_MAX_LEN) - dev_data->dev_conf.rxmode.jumbo_frame = 1; + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev_data->dev_conf.rxmode.jumbo_frame = 0; + dev_data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -11413,7 +11754,7 @@ i40e_rss_filter_restore(struct i40e_pf *pf) { struct i40e_rte_flow_rss_conf *conf = &pf->rss_info; - if (conf->num) + if (conf->conf.queue_num) i40e_config_rss_filter(pf, conf, TRUE); } @@ -11456,7 +11797,8 @@ i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index) static int i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, uint32_t pkg_size, uint32_t proto_num, - struct rte_pmd_i40e_proto_info *proto) + struct rte_pmd_i40e_proto_info *proto, + enum rte_pmd_i40e_package_op op) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); uint32_t pctype_num; @@ -11469,6 +11811,12 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, uint32_t i, j, n; int ret; + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return -1; + } + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)&pctype_num, sizeof(pctype_num), RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM); @@ -11531,8 +11879,13 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, i40e_find_customized_pctype(pf, I40E_CUSTOMIZED_GTPU); if (new_pctype) { - new_pctype->pctype = pctype_value; - new_pctype->valid = true; + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { + new_pctype->pctype = pctype_value; + new_pctype->valid = true; + } else { + new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID; + new_pctype->valid = false; + } } } @@ -11542,8 +11895,9 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, static int i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, - uint32_t pkg_size, uint32_t proto_num, - struct rte_pmd_i40e_proto_info *proto) + uint32_t pkg_size, uint32_t proto_num, + struct rte_pmd_i40e_proto_info *proto, + enum rte_pmd_i40e_package_op op) { struct rte_pmd_i40e_ptype_mapping *ptype_mapping; uint16_t port_id = dev->data->port_id; @@ -11556,6 +11910,17 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, bool in_tunnel; int ret; + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return -1; + } + + if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) { + rte_pmd_i40e_ptype_mapping_reset(port_id); + return 0; + } + /* get information about new ptype num */ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)&ptype_num, sizeof(ptype_num), @@ -11728,7 +12093,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, - uint32_t pkg_size) + uint32_t pkg_size, enum rte_pmd_i40e_package_op op) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); uint32_t proto_num; @@ -11737,6 +12102,12 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, uint32_t i; int ret; + if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && + op != RTE_PMD_I40E_PKG_OP_WR_DEL) { + PMD_DRV_LOG(ERR, "Unsupported operation."); + return; + } + /* get information about protocol number */ ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)&proto_num, sizeof(proto_num), @@ -11770,20 +12141,23 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, /* Check if GTP is supported. */ for (i = 0; i < proto_num; i++) { if (!strncmp(proto[i].name, "GTP", 3)) { - pf->gtp_support = true; + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) + pf->gtp_support = true; + else + pf->gtp_support = false; break; } } /* Update customized pctype info */ ret = i40e_update_customized_pctype(dev, pkg, pkg_size, - proto_num, proto); + proto_num, proto, op); if (ret) PMD_DRV_LOG(INFO, "No pctype is updated."); /* Update customized ptype info */ ret = i40e_update_customized_ptype(dev, pkg, pkg_size, - proto_num, proto); + proto_num, proto, op); if (ret) PMD_DRV_LOG(INFO, "No ptype is updated."); @@ -11911,6 +12285,40 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) return ret; } +int +i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +i40e_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + int i40e_config_rss_filter(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *conf, bool add) @@ -11918,12 +12326,16 @@ i40e_config_rss_filter(struct i40e_pf *pf, struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint32_t i, lut = 0; uint16_t j, num; - struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; if (!add) { - if (memcmp(conf, rss_info, - sizeof(struct i40e_rte_flow_rss_conf)) == 0) { + if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) { i40e_pf_disable_rss(pf); memset(rss_info, 0, sizeof(struct i40e_rte_flow_rss_conf)); @@ -11932,7 +12344,7 @@ i40e_config_rss_filter(struct i40e_pf *pf, return -EINVAL; } - if (rss_info->num) + if (rss_info->conf.queue_num) return -EINVAL; /* If both VMDQ and RSS enabled, not all of PF queues are configured. @@ -11943,7 +12355,7 @@ i40e_config_rss_filter(struct i40e_pf *pf, else num = pf->dev_data->nb_rx_queues; - num = RTE_MIN(num, conf->num); + num = RTE_MIN(num, conf->conf.queue_num); PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured", num); @@ -11956,7 +12368,7 @@ i40e_config_rss_filter(struct i40e_pf *pf, for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { if (j == num) j = 0; - lut = (lut << 8) | (conf->queue[j] & ((0x1 << + lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 << hw->func_caps.rss_table_entry_width) - 1)); if ((i & 3) == 3) I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); @@ -11981,8 +12393,8 @@ i40e_config_rss_filter(struct i40e_pf *pf, i40e_hw_rss_hash_set(pf, &rss_conf); - rte_memcpy(rss_info, - conf, sizeof(struct i40e_rte_flow_rss_conf)); + if (i40e_rss_conf_init(rss_info, &conf->conf)) + return -EINVAL; return 0; } diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 99efb670..11c4c76b 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -5,12 +5,18 @@ #ifndef _I40E_ETHDEV_H_ #define _I40E_ETHDEV_H_ +#include + #include #include #include #include +#include #include #include +#include "rte_pmd_i40e.h" + +#include "base/i40e_register.h" #define I40E_VLAN_TAG_SIZE 4 @@ -80,11 +86,13 @@ #define I40E_WRITE_GLB_REG(hw, reg, value) \ do { \ + uint32_t ori_val; \ + ori_val = I40E_READ_REG((hw), (reg)); \ I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \ (reg)), (value)); \ - PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " \ - "with value 0x%08x", \ - (reg), (value)); \ + PMD_DRV_LOG(DEBUG, "global register [0x%08x] " \ + "original: 0x%08x, after: 0x%08x ", \ + (reg), (ori_val), (value)); \ } while (0) /* index flex payload per layer */ @@ -877,9 +885,11 @@ struct i40e_customized_pctype { }; struct i40e_rte_flow_rss_conf { - struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */ + struct rte_flow_action_rss conf; /**< RSS parameters. */ uint16_t queue_region_conf; /**< Queue region config flag */ - uint16_t num; /**< Number of entries in queue[]. */ + uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ? + I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t)]; /* Hash key. */ uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */ }; @@ -956,6 +966,8 @@ struct i40e_pf { bool gtp_support; /* 1 - support GTP-C and GTP-U */ /* customer customized pctype */ struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX]; + /* Switch Domain Id */ + uint16_t switch_domain_id; }; enum pending_msg { @@ -1005,6 +1017,9 @@ struct i40e_vf { uint16_t promisc_flags; /* Promiscuous setting */ uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */ + struct ether_addr mc_addrs[I40E_NUM_MACADDR_MAX]; /* Multicast addrs */ + uint16_t mc_addrs_num; /* Multicast mac addresses number */ + /* Event from pf */ bool dev_closed; bool link_up; @@ -1058,6 +1073,20 @@ struct i40e_adapter { uint64_t pctypes_mask; }; +/** + * Strucute to store private data for each VF representor instance + */ +struct i40e_vf_representor { + uint16_t switch_domain_id; + /**< Virtual Function ID */ + uint16_t vf_id; + /**< Virtual Function ID */ + struct i40e_adapter *adapter; + /**< Private data store of assocaiated physical function */ + struct i40e_eth_stats stats_offset; + /**< Zero-point of VF statistics*/ +}; + extern const struct rte_flow_ops i40e_flow_ops; union i40e_filter_t { @@ -1206,7 +1235,8 @@ void i40e_tm_conf_uninit(struct rte_eth_dev *dev); struct i40e_customized_pctype* i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index); void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, - uint32_t pkg_size); + uint32_t pkg_size, + enum rte_pmd_i40e_package_op op); int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev, struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); @@ -1214,8 +1244,14 @@ void i40e_init_queue_region_conf(struct rte_eth_dev *dev); void i40e_flex_payload_reg_set_default(struct i40e_hw *hw); int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len); int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size); +int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int i40e_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); int i40e_config_rss_filter(struct i40e_pf *pf, struct i40e_rte_flow_rss_conf *conf, bool add); +int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); +int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev); #define I40E_DEV_TO_PCI(eth_dev) \ RTE_DEV_TO_PCI((eth_dev)->device) diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index fd003fe0..804e4453 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -120,7 +120,7 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev, +static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); @@ -130,6 +130,14 @@ static void i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg, uint16_t msglen); +static int +i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr, bool add); +static int +i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr); + /* Default hash key buffer for RSS */ static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1]; @@ -195,6 +203,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .txq_info_get = i40e_txq_info_get, .mac_addr_add = i40evf_add_mac_addr, .mac_addr_remove = i40evf_del_mac_addr, + .set_mc_addr_list = i40evf_set_mc_addr_list, .reta_update = i40evf_dev_rss_reta_update, .reta_query = i40evf_dev_rss_reta_query, .rss_hash_update = i40evf_dev_rss_hash_update, @@ -1036,20 +1045,6 @@ static const struct rte_pci_id pci_id_i40evf_map[] = { { .vendor_id = 0, /* sentinel */ }, }; -static inline int -i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - /* Disable IRQ0 */ static inline void i40evf_disable_irq0(struct i40e_hw *hw) @@ -1541,7 +1536,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev) /* For non-DPDK PF drivers, VF has no ability to disable HW * CRC strip, and is implicitly enabled by the PF. */ - if (!conf->rxmode.hw_strip_crc) { + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) && (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) { @@ -1575,7 +1570,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask) /* Vlan stripping setting */ if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ - if (dev_conf->rxmode.hw_vlan_strip) + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) i40evf_enable_vlan_strip(dev); else i40evf_disable_vlan_strip(dev); @@ -1732,7 +1727,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) /** * Check if the jumbo frame and maximum packet length are set correctly */ - if (dev_data->dev_conf.rxmode.jumbo_frame == 1) { + if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (rxq->max_pkt_len <= ETHER_MAX_LEN || rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must be " @@ -1752,7 +1747,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq) } } - if (dev_data->dev_conf.rxmode.enable_scatter || + if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) { dev_data->scattered_rx = 1; } @@ -2012,6 +2007,9 @@ i40evf_dev_start(struct rte_eth_dev *dev) /* Set all mac addrs */ i40evf_add_del_all_mac_addr(dev, TRUE); + /* Set all multicast addresses */ + i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + TRUE); if (i40evf_start_queues(dev) != 0) { PMD_DRV_LOG(ERR, "enable queues failed"); @@ -2036,6 +2034,8 @@ i40evf_dev_start(struct rte_eth_dev *dev) err_mac: i40evf_add_del_all_mac_addr(dev, FALSE); + i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + FALSE); err_queue: return -1; } @@ -2046,6 +2046,7 @@ i40evf_dev_stop(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -2063,6 +2064,9 @@ i40evf_dev_stop(struct rte_eth_dev *dev) } /* remove all mac addrs */ i40evf_add_del_all_mac_addr(dev, FALSE); + /* remove all multicast addresses */ + i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + FALSE); hw->adapter_stopped = 1; } @@ -2078,6 +2082,7 @@ i40evf_dev_link_update(struct rte_eth_dev *dev, * while Linux driver does not */ + memset(&new_link, 0, sizeof(new_link)); /* Linux driver PF host */ switch (vf->link_speed) { case I40E_LINK_SPEED_100MB: @@ -2107,11 +2112,9 @@ i40evf_dev_link_update(struct rte_eth_dev *dev, new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; new_link.link_autoneg = - dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED; + !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); - i40evf_dev_atomic_write_link_status(dev, &new_link); - - return 0; + return rte_eth_linkstatus_set(dev, &new_link); } static void @@ -2180,7 +2183,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); memset(dev_info, 0, sizeof(*dev_info)); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; @@ -2189,6 +2191,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->reta_size = ETH_RSS_RETA_SIZE_64; dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask; dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; + dev_info->rx_queue_offload_capa = 0; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_QINQ_STRIP | @@ -2196,7 +2199,12 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER; + + dev_info->tx_queue_offload_capa = 0; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | @@ -2209,7 +2217,8 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_IPIP_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -2219,6 +2228,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2229,8 +2239,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { @@ -2281,6 +2290,14 @@ i40evf_dev_close(struct rte_eth_dev *dev) i40evf_dev_stop(dev); i40e_dev_free_queues(dev); + /* + * disable promiscuous mode before reset vf + * it is a workaround solution when work with kernel driver + * and it is not the normal way + */ + i40evf_dev_promiscuous_disable(dev); + i40evf_dev_allmulticast_disable(dev); + i40evf_reset_vf(hw); i40e_shutdown_adminq(hw); /* disable uio intr before callback unregister */ @@ -2649,16 +2666,17 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } if (frame_size > ETHER_MAX_LEN) - dev_data->dev_conf.rxmode.jumbo_frame = 1; + dev_data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev_data->dev_conf.rxmode.jumbo_frame = 0; - + dev_data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; return ret; } -static void +static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { @@ -2667,15 +2685,99 @@ i40evf_set_default_mac_addr(struct rte_eth_dev *dev, if (!is_valid_assigned_ether_addr(mac_addr)) { PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); - return; + return -EINVAL; } if (vf->flags & I40E_FLAG_VF_MAC_BY_PF) - return; + return -EPERM; i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr); - i40evf_add_mac_addr(dev, mac_addr, 0, 0); + if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0) + return -EIO; ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr); + return 0; +} + +static int +i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addrs, + uint32_t mc_addrs_num, bool add) +{ + struct virtchnl_ether_addr_list *list; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + + (I40E_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))]; + uint32_t i; + int err; + struct vf_cmd_info args; + + if (mc_addrs == NULL || mc_addrs_num == 0) + return 0; + + if (mc_addrs_num > I40E_NUM_MACADDR_MAX) + return -EINVAL; + + list = (struct virtchnl_ether_addr_list *)cmd_buffer; + list->vsi_id = vf->vsi_res->vsi_id; + list->num_elements = mc_addrs_num; + + for (i = 0; i < mc_addrs_num; i++) { + if (!I40E_IS_MULTICAST(mc_addrs[i].addr_bytes)) { + PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x", + mc_addrs[i].addr_bytes[0], + mc_addrs[i].addr_bytes[1], + mc_addrs[i].addr_bytes[2], + mc_addrs[i].addr_bytes[3], + mc_addrs[i].addr_bytes[4], + mc_addrs[i].addr_bytes[5]); + return -EINVAL; + } + + memcpy(list->list[i].addr, mc_addrs[i].addr_bytes, + sizeof(list->list[i].addr)); + } + + args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR; + args.in_args = cmd_buffer; + args.in_args_size = sizeof(struct virtchnl_ether_addr_list) + + i * sizeof(struct virtchnl_ether_addr); + args.out_buffer = vf->aq_resp; + args.out_size = I40E_AQ_BUF_SZ; + err = i40evf_execute_vf_cmd(dev, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR"); + return err; + } + + return 0; +} + +static int +i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addrs, + uint32_t mc_addrs_num) +{ + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + int err; + + /* flush previous addresses */ + err = i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num, + FALSE); + if (err) + return err; + + vf->mc_addrs_num = 0; + + /* add new ones */ + err = i40evf_add_del_mc_addr_list(dev, mc_addrs, mc_addrs_num, + TRUE); + if (err) + return err; + + vf->mc_addrs_num = mc_addrs_num; + memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs)); + + return 0; } diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index b83a0cff..a4a61d1c 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -525,7 +525,7 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf, flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | (layer_idx * I40E_MAX_FLXPLD_FIED); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD); } diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index 16c47cf7..89de6a59 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -53,6 +54,7 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, struct rte_flow_error *error, struct rte_eth_ethertype_filter *filter); static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, struct rte_flow_error *error, struct i40e_fdir_filter_conf *filter); @@ -1939,7 +1941,8 @@ static uint16_t i40e_get_outer_vlan(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; + int qinq = dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND; uint64_t reg_r = 0; uint16_t reg_id; uint16_t tpid; @@ -2259,7 +2262,7 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf, flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | (layer_idx * I40E_MAX_FLXPLD_FIED); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD); } @@ -2400,7 +2403,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf, break; } - if (cus_pctype) + if (cus_pctype && cus_pctype->valid) return cus_pctype->pctype; return I40E_FILTER_PCTYPE_INVALID; @@ -2419,6 +2422,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf, */ static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, const struct rte_flow_item *pattern, struct rte_flow_error *error, struct i40e_fdir_filter_conf *filter) @@ -2490,16 +2494,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, "Invalid MAC_addr mask."); return -rte_errno; } + } + if (eth_spec && eth_mask && eth_mask->type) { + enum rte_flow_item_type next = (item + 1)->type; - if ((eth_mask->type & UINT16_MAX) == - UINT16_MAX) { - input_set |= I40E_INSET_LAST_ETHER_TYPE; - filter->input.flow.l2_flow.ether_type = - eth_spec->type; + if (eth_mask->type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid type mask."); + return -rte_errno; } ether_type = rte_be_to_cpu_16(eth_spec->type); - if (ether_type == ETHER_TYPE_IPv4 || + + if (next == RTE_FLOW_ITEM_TYPE_VLAN || + ether_type == ETHER_TYPE_IPv4 || ether_type == ETHER_TYPE_IPv6 || ether_type == ETHER_TYPE_ARP || ether_type == outer_tpid) { @@ -2509,6 +2519,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, "Unsupported ether_type."); return -rte_errno; } + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + eth_spec->type; } pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; @@ -2518,6 +2531,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; vlan_mask = item->mask; + + RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE)); if (vlan_spec && vlan_mask) { if (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) { @@ -2526,6 +2541,33 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, vlan_spec->tci; } } + if (vlan_spec && vlan_mask && vlan_mask->inner_type) { + if (vlan_mask->inner_type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid inner_type" + " mask."); + return -rte_errno; + } + + ether_type = + rte_be_to_cpu_16(vlan_spec->inner_type); + + if (ether_type == ETHER_TYPE_IPv4 || + ether_type == ETHER_TYPE_IPv6 || + ether_type == ETHER_TYPE_ARP || + ether_type == outer_tpid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported inner_type."); + return -rte_errno; + } + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + vlan_spec->inner_type; + } pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD; layer_idx = I40E_FLXPLD_L2_IDX; @@ -2918,6 +2960,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_VF: vf_spec = item->spec; + if (!attr->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Matching VF traffic" + " without affecting it" + " (transfer attribute)" + " is unsupported"); + return -rte_errno; + } filter->input.flow_ext.is_vf = 1; filter->input.flow_ext.dst_id = vf_spec->id; if (filter->input.flow_ext.is_vf && @@ -3080,7 +3132,8 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, &filter->fdir_filter; int ret; - ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter); + ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error, + fdir_filter); if (ret) return ret; @@ -3284,7 +3337,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; vlan_mask = item->mask; - if (!(vlan_spec && vlan_mask)) { + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -3514,7 +3568,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; vlan_mask = item->mask; - if (!(vlan_spec && vlan_mask)) { + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -4022,7 +4077,8 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev, vlan_spec = item->spec; vlan_mask = item->mask; - if (!(vlan_spec && vlan_mask)) { + if (!(vlan_spec && vlan_mask) || + vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -4150,7 +4206,8 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev, if (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) { info->region[0].user_priority[0] = - (vlan_spec->tci >> 13) & 0x7; + (rte_be_to_cpu_16( + vlan_spec->tci) >> 13) & 0x7; info->region[0].user_priority_num = 1; info->queue_region_number = 1; *action_flag = 0; @@ -4169,6 +4226,19 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev, return 0; } +/** + * This function is used to parse rss queue index, total queue number and + * hash functions, If the purpose of this configuration is for queue region + * configuration, it will set queue_region_conf flag to TRUE, else to FALSE. + * In queue region configuration, it also need to parse hardware flowtype + * and user_priority from configuration, it will also cheeck the validity + * of these parameters. For example, The queue region sizes should + * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the + * hw_flowtype or PCTYPE max index should be 63, the user priority + * max index should be 7, and so on. And also, queue index should be + * continuous sequence and queue region index should be part of rss + * queue index for this port. + */ static int i40e_flow_parse_rss_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions, @@ -4205,7 +4275,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, if (action_flag) { for (n = 0; n < 64; n++) { - if (rss->rss_conf->rss_hf & (hf_bit << n)) { + if (rss->types & (hf_bit << n)) { conf_info->region[0].hw_flowtype[0] = n; conf_info->region[0].flowtype_num = 1; conf_info->queue_region_number = 1; @@ -4214,41 +4284,73 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, } } + /** + * Do some queue region related parameters check + * in order to keep queue index for queue region to be + * continuous sequence and also to be part of RSS + * queue index for this port. + */ + if (conf_info->queue_region_number) { + for (i = 0; i < rss->queue_num; i++) { + for (j = 0; j < rss_info->conf.queue_num; j++) { + if (rss->queue[i] == rss_info->conf.queue[j]) + break; + } + if (j == rss_info->conf.queue_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + } + + for (i = 0; i < rss->queue_num - 1; i++) { + if (rss->queue[i + 1] != rss->queue[i] + 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + } + } + + /* Parse queue region related parameters from configuration */ for (n = 0; n < conf_info->queue_region_number; n++) { if (conf_info->region[n].user_priority_num || conf_info->region[n].flowtype_num) { - if (!((rte_is_power_of_2(rss->num)) && - rss->num <= 64)) { - PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the " - "total number of queues do not exceed the VSI allocation"); + if (!((rte_is_power_of_2(rss->queue_num)) && + rss->queue_num <= 64)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the " + "total number of queues do not exceed the VSI allocation"); return -rte_errno; } if (conf_info->region[n].user_priority[n] >= I40E_MAX_USER_PRIORITY) { - PMD_DRV_LOG(ERR, "the user priority max index is 7"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "the user priority max index is 7"); return -rte_errno; } if (conf_info->region[n].hw_flowtype[n] >= I40E_FILTER_PCTYPE_MAX) { - PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63"); - return -rte_errno; - } - - if (rss_info->num < rss->num || - rss_info->queue[0] < rss->queue[0] || - (rss->queue[0] + rss->num > - rss_info->num + rss_info->queue[0])) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, - "no valid queues"); + "the hw_flowtype or PCTYPE max index is 63"); return -rte_errno; } for (i = 0; i < info->queue_region_number; i++) { - if (info->region[i].queue_num == rss->num && + if (info->region[i].queue_num == + rss->queue_num && info->region[i].queue_start_index == rss->queue[0]) break; @@ -4256,12 +4358,15 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, if (i == info->queue_region_number) { if (i > I40E_REGION_MAX_INDEX) { - PMD_DRV_LOG(ERR, "the queue region max index is 7"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "the queue region max index is 7"); return -rte_errno; } info->region[i].queue_num = - rss->num; + rss->queue_num; info->region[i].queue_start_index = rss->queue[0]; info->region[i].region_id = @@ -4301,10 +4406,13 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, rss_config->queue_region_conf = TRUE; } + /** + * Return function if this flow is used for queue region configuration + */ if (rss_config->queue_region_conf) return 0; - if (!rss || !rss->num) { + if (!rss || !rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -4312,7 +4420,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, return -rte_errno; } - for (n = 0; n < rss->num; n++) { + for (n = 0; n < rss->queue_num; n++) { if (rss->queue[n] >= dev->data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -4321,15 +4429,29 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev, return -rte_errno; } } - if (rss->rss_conf) - rss_config->rss_conf = *rss->rss_conf; - else - rss_config->rss_conf.rss_hf = - pf->adapter->flow_types_mask; - for (n = 0; n < rss->num; ++n) - rss_config->queue[n] = rss->queue[n]; - rss_config->num = rss->num; + /* Parse RSS related parameters from configuration */ + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key too large"); + if (rss->queue_num > RTE_DIM(rss_config->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (i40e_rss_conf_init(rss_config, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); + index++; /* check if the next not void action is END */ @@ -4385,14 +4507,15 @@ i40e_config_rss_filter_set(struct rte_eth_dev *dev, { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; if (conf->queue_region_conf) { - i40e_flush_queue_region_all_conf(dev, hw, pf, 1); + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1); conf->queue_region_conf = 0; } else { - i40e_config_rss_filter(pf, conf, 1); + ret = i40e_config_rss_filter(pf, conf, 1); } - return 0; + return ret; } static int @@ -4545,6 +4668,8 @@ i40e_flow_create(struct rte_eth_dev *dev, case RTE_ETH_FILTER_HASH: ret = i40e_config_rss_filter_set(dev, &cons_filter.rss_conf); + if (ret) + goto free_flow; flow->rule = &pf->rss_info; break; default: @@ -4846,7 +4971,7 @@ i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0); - if (rss_info->num) + if (rss_info->conf.queue_num) ret = i40e_config_rss_filter(pf, rss_info, FALSE); return ret; } diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 1217e5a6..6032d554 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -40,9 +40,6 @@ /* Base address of the HW descriptor ring should be 128B aligned. */ #define I40E_RING_BASE_ALIGN 128 -#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) #ifdef RTE_LIBRTE_IEEE1588 @@ -1240,7 +1237,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq) for (i = 0; i < txq->tx_rs_thresh; i++) rte_prefetch0((txep + i)->mbuf); - if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) { + if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) { for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { rte_mempool_put(txep->mbuf->pool, txep->mbuf); txep->mbuf = NULL; @@ -1692,6 +1689,75 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } +static int +i40e_dev_first_queue(uint16_t idx, void **queues, int num) +{ + uint16_t i; + + for (i = 0; i < num; i++) { + if (i != idx && queues[i]) + return 0; + } + + return 1; +} + +static int +i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_rx_queue *rxq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + uint16_t buf_size = + (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + int use_scattered_rx = + ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size); + + if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do RX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_queue(rxq->queue_id, + dev->data->rx_queues, + dev->data->nb_rx_queues)) { + /** + * If it is the first queue to setup, + * set all flags to default and call + * i40e_set_rx_function. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + dev->data->scattered_rx = use_scattered_rx; + if (use_def_burst_func) + ad->rx_bulk_alloc_allowed = false; + i40e_set_rx_function(dev); + return 0; + } + + /* check bulk alloc conflict */ + if (ad->rx_bulk_alloc_allowed && use_def_burst_func) { + PMD_DRV_LOG(ERR, "Can't use default burst."); + return -EINVAL; + } + /* check scatterred conflict */ + if (!dev->data->scattered_rx && use_scattered_rx) { + PMD_DRV_LOG(ERR, "Scattered rx is required."); + return -EINVAL; + } + /* check vector conflict */ + if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) { + PMD_DRV_LOG(ERR, "Failed vector rx setup."); + return -EINVAL; + } + + return 0; +} + int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1712,6 +1778,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t len, i; uint16_t reg_idx, base, bsf, tc_mapping; int q_offset, use_def_burst_func = 1; + uint64_t offloads; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -1760,11 +1829,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->queue_id = queue_idx; rxq->reg_idx = reg_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; rxq->vsi = vsi; rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->offloads = offloads; /* Allocate the maximun number of RX ring hardware descriptor. */ len = I40E_MAX_RING_DESC; @@ -1808,25 +1878,6 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, i40e_reset_rx_queue(rxq); rxq->q_set = TRUE; - dev->data->rx_queues[queue_idx] = rxq; - - use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); - - if (!use_def_burst_func) { -#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "satisfied. Rx Burst Bulk Alloc function will be " - "used on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); -#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ - } else { - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "not satisfied, Scattered Rx is requested, " - "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " - "not enabled on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); - ad->rx_bulk_alloc_allowed = false; - } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) @@ -1841,6 +1892,34 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->dcb_tc = i; } + if (dev->data->dev_started) { + if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) { + i40e_dev_rx_queue_release(rxq); + return -EINVAL; + } + } else { + use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + if (!use_def_burst_func) { +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function will be " + "used on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + } else { + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "not satisfied, Scattered Rx is requested, " + "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " + "not enabled on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + } + + dev->data->rx_queues[queue_idx] = rxq; return 0; } @@ -1972,6 +2051,52 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } +static int +i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (i40e_tx_queue_init(txq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do TX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_queue(txq->queue_id, + dev->data->tx_queues, + dev->data->nb_tx_queues)) { + /** + * If it is the first queue to setup, + * set all flags and call + * i40e_set_tx_function. + */ + i40e_set_tx_function_flag(dev, txq); + i40e_set_tx_function(dev); + return 0; + } + + /* check vector conflict */ + if (ad->tx_vec_allowed) { + if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ || + i40e_txq_vec_setup(txq)) { + PMD_DRV_LOG(ERR, "Failed vector tx setup."); + return -EINVAL; + } + } + /* check simple tx conflict */ + if (ad->tx_simple_allowed) { + if (txq->offloads != 0 || + txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) { + PMD_DRV_LOG(ERR, "No-simple tx is required."); + return -EINVAL; + } + } + + return 0; +} + int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1989,6 +2114,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_rs_thresh, tx_free_thresh; uint16_t reg_idx, i, base, bsf, tc_mapping; int q_offset; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -2123,7 +2251,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->queue_id = queue_idx; txq->reg_idx = reg_idx; txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; + txq->offloads = offloads; txq->vsi = vsi; txq->tx_deferred_start = tx_conf->tx_deferred_start; @@ -2144,10 +2272,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, i40e_reset_tx_queue(txq); txq->q_set = TRUE; - dev->data->tx_queues[queue_idx] = txq; - - /* Use a simple TX queue without offloads or multi segs if possible */ - i40e_set_tx_function_flag(dev, txq); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) @@ -2162,6 +2286,20 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->dcb_tc = i; } + if (dev->data->dev_started) { + if (i40e_dev_tx_queue_setup_runtime(dev, txq)) { + i40e_dev_tx_queue_release(txq); + return -EINVAL; + } + } else { + /** + * Use a simple TX queue without offloads or + * multi segs if possible + */ + i40e_set_tx_function_flag(dev, txq); + } + dev->data->tx_queues[queue_idx] = txq; + return 0; } @@ -2189,8 +2327,8 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id) if (mz) return mz; - mz = rte_memzone_reserve_aligned(name, len, - socket_id, 0, I40E_RING_BASE_ALIGN); + mz = rte_memzone_reserve_aligned(name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, I40E_RING_BASE_ALIGN); return mz; } @@ -2469,7 +2607,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len; rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len); - if (data->dev_conf.rxmode.jumbo_frame == 1) { + if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (rxq->max_pkt_len <= ETHER_MAX_LEN || rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must " @@ -2747,6 +2885,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; } void @@ -2765,8 +2904,8 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; - qinfo->conf.txq_flags = txq->txq_flags; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; + qinfo->conf.offloads = txq->offloads; } void __attribute__((cold)) @@ -2889,19 +3028,24 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - /* Use a simple Tx queue (no offloads, no multi segs) if possible */ - if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) - && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) { - if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) { - PMD_INIT_LOG(DEBUG, "Vector tx" - " can be enabled on this txq."); - - } else { - ad->tx_vec_allowed = false; - } - } else { - ad->tx_simple_allowed = false; - } + /* Use a simple Tx queue if possible (only fast free is allowed) */ + ad->tx_simple_allowed = + (txq->offloads == + (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) && + txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST); + ad->tx_vec_allowed = (ad->tx_simple_allowed && + txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ); + + if (ad->tx_vec_allowed) + PMD_INIT_LOG(DEBUG, "Vector Tx can be enabled on Tx queue %u.", + txq->queue_id); + else if (ad->tx_simple_allowed) + PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.", + txq->queue_id); + else + PMD_INIT_LOG(DEBUG, + "Neither simple nor vector Tx enabled on Tx queue %u\n", + txq->queue_id); } void __attribute__((cold)) diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 34cd7923..ea73a8a1 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -107,6 +107,7 @@ struct i40e_rx_queue { bool rx_deferred_start; /**< don't start this queue in dev start */ uint16_t rx_using_sse; /**header_split == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT) return -1; /* no QinQ support */ - if (rxmode->hw_vlan_extend == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) return -1; return 0; diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c index e549d1e8..83572ef8 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_neon.c +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -1,35 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * Copyright(c) 2016, Linaro Limited - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation. + * Copyright(c) 2016-2018, Linaro Limited. */ #include diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c new file mode 100644 index 00000000..f9f13161 --- /dev/null +++ b/drivers/net/i40e/i40e_vf_representor.c @@ -0,0 +1,531 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation. + */ + +#include +#include +#include +#include + +#include "base/i40e_type.h" +#include "base/virtchnl.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "rte_pmd_i40e.h" + +static int +i40e_vf_representor_link_update(struct rte_eth_dev *ethdev, + int wait_to_complete) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return i40e_dev_link_update(representor->adapter->eth_dev, + wait_to_complete); +} +static void +i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev, + struct rte_eth_dev_info *dev_info) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + /* get dev info for the vdev */ + dev_info->device = ethdev->device; + + dev_info->max_rx_queues = ethdev->data->nb_rx_queues; + dev_info->max_tx_queues = ethdev->data->nb_tx_queues; + + dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN; + dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX; + dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + dev_info->reta_size = ETH_RSS_RETA_SIZE_64; + dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL; + dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = I40E_DEFAULT_RX_PTHRESH, + .hthresh = I40E_DEFAULT_RX_HTHRESH, + .wthresh = I40E_DEFAULT_RX_WTHRESH, + }, + .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + .offloads = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = I40E_DEFAULT_TX_PTHRESH, + .hthresh = I40E_DEFAULT_TX_HTHRESH, + .wthresh = I40E_DEFAULT_TX_WTHRESH, + }, + .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH, + .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH, + .offloads = 0, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = I40E_MAX_RING_DESC, + .nb_min = I40E_MIN_RING_DESC, + .nb_align = I40E_ALIGN_RING_DESC, + }; + + dev_info->switch_info.name = + representor->adapter->eth_dev->device->name; + dev_info->switch_info.domain_id = representor->switch_domain_id; + dev_info->switch_info.port_id = representor->vf_id; +} + +static int +i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int +i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static void +i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev) +{ +} + +static int +i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + __rte_unused struct rte_mempool *mb_pool) +{ + return 0; +} + +static int +i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + return 0; +} + +static void +i40evf_stat_update_48(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = *stat - *offset; + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset); + + *stat &= I40E_48_BIT_MASK; +} + +static void +i40evf_stat_update_32(uint64_t *offset, + uint64_t *stat) +{ + if (*stat >= *offset) + *stat = (uint64_t)(*stat - *offset); + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset); +} + +static int +rte_pmd_i40e_get_vf_native_stats(uint16_t port, + uint16_t vf_id, + struct i40e_eth_stats *stats) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + i40e_update_vsi_stats(vsi); + memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats)); + + return 0; +} + +static int +i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev, + struct rte_eth_stats *stats) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + struct i40e_eth_stats native_stats; + int ret; + + ret = rte_pmd_i40e_get_vf_native_stats( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, &native_stats); + if (ret == 0) { + i40evf_stat_update_48( + &representor->stats_offset.rx_bytes, + &native_stats.rx_bytes); + i40evf_stat_update_48( + &representor->stats_offset.rx_unicast, + &native_stats.rx_unicast); + i40evf_stat_update_48( + &representor->stats_offset.rx_multicast, + &native_stats.rx_multicast); + i40evf_stat_update_48( + &representor->stats_offset.rx_broadcast, + &native_stats.rx_broadcast); + i40evf_stat_update_32( + &representor->stats_offset.rx_discards, + &native_stats.rx_discards); + i40evf_stat_update_32( + &representor->stats_offset.rx_unknown_protocol, + &native_stats.rx_unknown_protocol); + i40evf_stat_update_48( + &representor->stats_offset.tx_bytes, + &native_stats.tx_bytes); + i40evf_stat_update_48( + &representor->stats_offset.tx_unicast, + &native_stats.tx_unicast); + i40evf_stat_update_48( + &representor->stats_offset.tx_multicast, + &native_stats.tx_multicast); + i40evf_stat_update_48( + &representor->stats_offset.tx_broadcast, + &native_stats.tx_broadcast); + i40evf_stat_update_32( + &representor->stats_offset.tx_errors, + &native_stats.tx_errors); + i40evf_stat_update_32( + &representor->stats_offset.tx_discards, + &native_stats.tx_discards); + + stats->ipackets = native_stats.rx_unicast + + native_stats.rx_multicast + + native_stats.rx_broadcast; + stats->opackets = native_stats.tx_unicast + + native_stats.tx_multicast + + native_stats.tx_broadcast; + stats->ibytes = native_stats.rx_bytes; + stats->obytes = native_stats.tx_bytes; + stats->ierrors = native_stats.rx_discards; + stats->oerrors = native_stats.tx_errors + native_stats.tx_discards; + } + return ret; +} + +static void +i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_get_vf_native_stats( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, &representor->stats_offset); +} + +static void +i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_unicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 1); +} + +static void +i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_unicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 0); +} + +static void +i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_multicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 1); +} + +static void +i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_multicast_promisc( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, 0); +} + +static void +i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_remove_vf_mac_addr( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, ðdev->data->mac_addrs[index]); +} + +static int +i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev, + struct ether_addr *mac_addr) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_set_vf_mac_addr( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, mac_addr); +} + +static int +i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev, + uint16_t vlan_id, int on) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + uint64_t vf_mask = 1ULL << representor->vf_id; + + return rte_pmd_i40e_set_vf_vlan_filter( + representor->adapter->eth_dev->data->port_id, + vlan_id, vf_mask, on); +} + +static int +i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + struct rte_eth_dev *pdev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + uint32_t vfid; + + pdev = representor->adapter->eth_dev; + vfid = representor->vf_id; + + if (!is_i40e_supported(pdev)) { + PMD_DRV_LOG(ERR, "Invalid PF dev."); + return -EINVAL; + } + + pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private); + + if (vfid >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vf = &pf->vfs[vfid]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (mask & ETH_VLAN_FILTER_MASK) { + /* Enable or disable VLAN filtering offload */ + if (ethdev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + return i40e_vsi_config_vlan_filter(vsi, TRUE); + else + return i40e_vsi_config_vlan_filter(vsi, FALSE); + } + + if (mask & ETH_VLAN_STRIP_MASK) { + /* Enable or disable VLAN stripping offload */ + if (ethdev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP) + return i40e_vsi_config_vlan_stripping(vsi, TRUE); + else + return i40e_vsi_config_vlan_stripping(vsi, FALSE); + } + + return -EINVAL; +} + +static void +i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev, + __rte_unused uint16_t rx_queue_id, int on) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_i40e_set_vf_vlan_stripq( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, on); +} + +static int +i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id, + __rte_unused int on) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_i40e_set_vf_vlan_insert( + representor->adapter->eth_dev->data->port_id, + representor->vf_id, vlan_id); +} + +struct eth_dev_ops i40e_representor_dev_ops = { + .dev_infos_get = i40e_vf_representor_dev_infos_get, + + .dev_start = i40e_vf_representor_dev_start, + .dev_configure = i40e_vf_representor_dev_configure, + .dev_stop = i40e_vf_representor_dev_stop, + + .rx_queue_setup = i40e_vf_representor_rx_queue_setup, + .tx_queue_setup = i40e_vf_representor_tx_queue_setup, + + .link_update = i40e_vf_representor_link_update, + + .stats_get = i40e_vf_representor_stats_get, + .stats_reset = i40e_vf_representor_stats_reset, + + .promiscuous_enable = i40e_vf_representor_promiscuous_enable, + .promiscuous_disable = i40e_vf_representor_promiscuous_disable, + + .allmulticast_enable = i40e_vf_representor_allmulticast_enable, + .allmulticast_disable = i40e_vf_representor_allmulticast_disable, + + .mac_addr_remove = i40e_vf_representor_mac_addr_remove, + .mac_addr_set = i40e_vf_representor_mac_addr_set, + + .vlan_filter_set = i40e_vf_representor_vlan_filter_set, + .vlan_offload_set = i40e_vf_representor_vlan_offload_set, + .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set, + .vlan_pvid_set = i40e_vf_representor_vlan_pvid_set + +}; + +static uint16_t +i40e_vf_representor_rx_burst(__rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static uint16_t +i40e_vf_representor_tx_burst(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int +i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params) +{ + struct i40e_vf_representor *representor = ethdev->data->dev_private; + + struct i40e_pf *pf; + struct i40e_pf_vf *vf; + struct rte_eth_link *link; + + representor->vf_id = + ((struct i40e_vf_representor *)init_params)->vf_id; + representor->switch_domain_id = + ((struct i40e_vf_representor *)init_params)->switch_domain_id; + representor->adapter = + ((struct i40e_vf_representor *)init_params)->adapter; + + pf = I40E_DEV_PRIVATE_TO_PF( + representor->adapter->eth_dev->data->dev_private); + + if (representor->vf_id >= pf->vf_num) + return -ENODEV; + + /** representor shares the same driver as it's PF device */ + ethdev->device->driver = representor->adapter->eth_dev->device->driver; + + /* Set representor device ops */ + ethdev->dev_ops = &i40e_representor_dev_ops; + + /* No data-path, but need stub Rx/Tx functions to avoid crash + * when testing with the likes of testpmd. + */ + ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst; + ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst; + + vf = &pf->vfs[representor->vf_id]; + + if (!vf->vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -ENODEV; + } + + ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + + /* Setting the number queues allocated to the VF */ + ethdev->data->nb_rx_queues = vf->vsi->nb_qps; + ethdev->data->nb_tx_queues = vf->vsi->nb_qps; + + ethdev->data->mac_addrs = &vf->mac_addr; + + /* Link state. Inherited from PF */ + link = &representor->adapter->eth_dev->data->dev_link; + + ethdev->data->dev_link.link_speed = link->link_speed; + ethdev->data->dev_link.link_duplex = link->link_duplex; + ethdev->data->dev_link.link_status = link->link_status; + ethdev->data->dev_link.link_autoneg = link->link_autoneg; + + return 0; +} + +int +i40e_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused) +{ + return 0; +} diff --git a/drivers/net/i40e/meson.build b/drivers/net/i40e/meson.build index 8764b0e5..d783f362 100644 --- a/drivers/net/i40e/meson.build +++ b/drivers/net/i40e/meson.build @@ -1,10 +1,13 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation +version = 2 + cflags += ['-DPF_DRIVER', '-DVF_DRIVER', '-DINTEGRATED_VF', - '-DX722_A0_SUPPORT'] + '-DX722_A0_SUPPORT', + '-DALLOW_EXPERIMENTAL_API'] subdir('base') objs = [base_objs] @@ -17,10 +20,12 @@ sources = files( 'i40e_fdir.c', 'i40e_flow.c', 'i40e_tm.c', + 'i40e_vf_representor.c', 'rte_pmd_i40e.c' ) deps += ['hash'] +includes += include_directories('base') if arch_subdir == 'x86' dpdk_conf.set('RTE_LIBRTE_I40E_INC_VECTOR', 1) @@ -36,11 +41,10 @@ if arch_subdir == 'x86' 'i40e_rxtx_vec_avx2.c', dependencies: [static_rte_ethdev, static_rte_kvargs, static_rte_hash], - c_args: '-mavx2') + include_directories: includes, + c_args: [cflags, '-mavx2']) objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c') endif endif -includes += include_directories('base') - install_headers('rte_pmd_i40e.h') diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c index dae59e6d..7aa1a751 100644 --- a/drivers/net/i40e/rte_pmd_i40e.c +++ b/drivers/net/i40e/rte_pmd_i40e.c @@ -570,6 +570,49 @@ rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id, return 0; } +static const struct ether_addr null_mac_addr; + +int +rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct ether_addr *mac_addr) +{ + struct rte_eth_dev *dev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + + if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) + return -EINVAL; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) + return -EINVAL; + + vf = &pf->vfs[vf_id]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (is_same_ether_addr(mac_addr, &vf->mac_addr)) + /* Reset the mac with NULL address */ + ether_addr_copy(&null_mac_addr, &vf->mac_addr); + + /* Remove the mac */ + i40e_vsi_delete_mac(vsi, mac_addr); + + return 0; +} + /* Set vlan strip on/off for specific VF from host */ int rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on) @@ -1530,6 +1573,11 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec) return 1; } } + /* profile with group id 0xff is compatible with any other profile */ + if ((pinfo->track_id & group_mask) == group_mask) { + rte_free(buff); + return 0; + } for (i = 0; i < p_list->p_count; i++) { p = &p_list->p_info[i]; if ((p->track_id & group_mask) == 0) { @@ -1540,6 +1588,8 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec) } for (i = 0; i < p_list->p_count; i++) { p = &p_list->p_info[i]; + if ((p->track_id & group_mask) == group_mask) + continue; if ((pinfo->track_id & group_mask) != (p->track_id & group_mask)) { PMD_DRV_LOG(INFO, "Profile of different group exists."); @@ -1603,8 +1653,6 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, return -EINVAL; } - i40e_update_customized_info(dev, buff, size); - /* Find metadata segment */ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr); @@ -1708,6 +1756,10 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, } } + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD || + op == RTE_PMD_I40E_PKG_OP_WR_DEL) + i40e_update_customized_info(dev, buff, size, op); + rte_free(profile_info_sec); return status; } @@ -3071,6 +3123,7 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, { struct rte_eth_dev *dev; struct i40e_hw *hw; + struct i40e_pf *pf; uint64_t inset_reg; uint32_t mask_reg[2]; int i; @@ -3086,10 +3139,12 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, return -EINVAL; hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - /* Clear mask first */ - for (i = 0; i < 2; i++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0); + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Input set configuration is not supported."); + return -ENOTSUP; + } inset_reg = inset->inset; for (i = 0; i < 2; i++) @@ -3098,14 +3153,17 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, switch (inset_type) { case INSET_HASH: - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), - (uint32_t)(inset_reg & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), - (uint32_t)((inset_reg >> - I40E_32_BIT_WIDTH) & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); for (i = 0; i < 2; i++) - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - mask_reg[i]); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + i40e_global_cfg_warning(I40E_WARNING_HASH_INSET); + i40e_global_cfg_warning(I40E_WARNING_HASH_MSK); break; case INSET_FDIR: i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), @@ -3114,8 +3172,10 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, (uint32_t)((inset_reg >> I40E_32_BIT_WIDTH) & UINT32_MAX)); for (i = 0; i < 2; i++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - mask_reg[i]); + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + i40e_global_cfg_warning(I40E_WARNING_FD_MSK); break; case INSET_FDIR_FLX: i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype), diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h index d248adb1..be4a6024 100644 --- a/drivers/net/i40e/rte_pmd_i40e.h +++ b/drivers/net/i40e/rte_pmd_i40e.h @@ -455,6 +455,24 @@ int rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, int rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id, struct ether_addr *mac_addr); +/** + * Remove the VF MAC address. + * + * @param port + * The port identifier of the Ethernet device. + * @param vf_id + * VF id. + * @param mac_addr + * VF MAC address. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if *vf* or *mac_addr* is invalid. + */ +int +rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct ether_addr *mac_addr); + /** * Enable/Disable vf vlan strip for all queues in a pool * diff --git a/drivers/net/ifc/Makefile b/drivers/net/ifc/Makefile new file mode 100644 index 00000000..1011995b --- /dev/null +++ b/drivers/net/ifc/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_ifcvf_vdpa.a + +LDLIBS += -lpthread +LDLIBS += -lrte_eal -lrte_pci -lrte_vhost -lrte_bus_pci + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API + +# +# Add extra flags for base driver source files to disable warnings in them +# +BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))) + +VPATH += $(SRCDIR)/base + +EXPORT_MAP := rte_ifcvf_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD) += ifcvf_vdpa.c +SRCS-$(CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD) += ifcvf.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/ifc/base/ifcvf.c b/drivers/net/ifc/base/ifcvf.c new file mode 100644 index 00000000..4b22d9ed --- /dev/null +++ b/drivers/net/ifc/base/ifcvf.c @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include "ifcvf.h" +#include "ifcvf_osdep.h" + +STATIC void * +get_cap_addr(struct ifcvf_hw *hw, struct ifcvf_pci_cap *cap) +{ + u8 bar = cap->bar; + u32 length = cap->length; + u32 offset = cap->offset; + + if (bar > IFCVF_PCI_MAX_RESOURCE - 1) { + DEBUGOUT("invalid bar: %u\n", bar); + return NULL; + } + + if (offset + length < offset) { + DEBUGOUT("offset(%u) + length(%u) overflows\n", + offset, length); + return NULL; + } + + if (offset + length > hw->mem_resource[cap->bar].len) { + DEBUGOUT("offset(%u) + length(%u) overflows bar length(%u)", + offset, length, (u32)hw->mem_resource[cap->bar].len); + return NULL; + } + + return hw->mem_resource[bar].addr + offset; +} + +int +ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev) +{ + int ret; + u8 pos; + struct ifcvf_pci_cap cap; + + ret = PCI_READ_CONFIG_BYTE(dev, &pos, PCI_CAPABILITY_LIST); + if (ret < 0) { + DEBUGOUT("failed to read pci capability list\n"); + return -1; + } + + while (pos) { + ret = PCI_READ_CONFIG_RANGE(dev, (u32 *)&cap, + sizeof(cap), pos); + if (ret < 0) { + DEBUGOUT("failed to read cap at pos: %x", pos); + break; + } + + if (cap.cap_vndr != PCI_CAP_ID_VNDR) + goto next; + + DEBUGOUT("cfg type: %u, bar: %u, offset: %u, " + "len: %u\n", cap.cfg_type, cap.bar, + cap.offset, cap.length); + + switch (cap.cfg_type) { + case IFCVF_PCI_CAP_COMMON_CFG: + hw->common_cfg = get_cap_addr(hw, &cap); + break; + case IFCVF_PCI_CAP_NOTIFY_CFG: + PCI_READ_CONFIG_DWORD(dev, &hw->notify_off_multiplier, + pos + sizeof(cap)); + hw->notify_base = get_cap_addr(hw, &cap); + hw->notify_region = cap.bar; + break; + case IFCVF_PCI_CAP_ISR_CFG: + hw->isr = get_cap_addr(hw, &cap); + break; + case IFCVF_PCI_CAP_DEVICE_CFG: + hw->dev_cfg = get_cap_addr(hw, &cap); + break; + } +next: + pos = cap.cap_next; + } + + hw->lm_cfg = hw->mem_resource[4].addr; + + if (hw->common_cfg == NULL || hw->notify_base == NULL || + hw->isr == NULL || hw->dev_cfg == NULL) { + DEBUGOUT("capability incomplete\n"); + return -1; + } + + DEBUGOUT("capability mapping:\ncommon cfg: %p\n" + "notify base: %p\nisr cfg: %p\ndevice cfg: %p\n" + "multiplier: %u\n", + hw->common_cfg, hw->dev_cfg, + hw->isr, hw->notify_base, + hw->notify_off_multiplier); + + return 0; +} + +STATIC u8 +ifcvf_get_status(struct ifcvf_hw *hw) +{ + return IFCVF_READ_REG8(&hw->common_cfg->device_status); +} + +STATIC void +ifcvf_set_status(struct ifcvf_hw *hw, u8 status) +{ + IFCVF_WRITE_REG8(status, &hw->common_cfg->device_status); +} + +STATIC void +ifcvf_reset(struct ifcvf_hw *hw) +{ + ifcvf_set_status(hw, 0); + + /* flush status write */ + while (ifcvf_get_status(hw)) + msec_delay(1); +} + +STATIC void +ifcvf_add_status(struct ifcvf_hw *hw, u8 status) +{ + if (status != 0) + status |= ifcvf_get_status(hw); + + ifcvf_set_status(hw, status); + ifcvf_get_status(hw); +} + +u64 +ifcvf_get_features(struct ifcvf_hw *hw) +{ + u32 features_lo, features_hi; + struct ifcvf_pci_common_cfg *cfg = hw->common_cfg; + + IFCVF_WRITE_REG32(0, &cfg->device_feature_select); + features_lo = IFCVF_READ_REG32(&cfg->device_feature); + + IFCVF_WRITE_REG32(1, &cfg->device_feature_select); + features_hi = IFCVF_READ_REG32(&cfg->device_feature); + + return ((u64)features_hi << 32) | features_lo; +} + +STATIC void +ifcvf_set_features(struct ifcvf_hw *hw, u64 features) +{ + struct ifcvf_pci_common_cfg *cfg = hw->common_cfg; + + IFCVF_WRITE_REG32(0, &cfg->guest_feature_select); + IFCVF_WRITE_REG32(features & ((1ULL << 32) - 1), &cfg->guest_feature); + + IFCVF_WRITE_REG32(1, &cfg->guest_feature_select); + IFCVF_WRITE_REG32(features >> 32, &cfg->guest_feature); +} + +STATIC int +ifcvf_config_features(struct ifcvf_hw *hw) +{ + u64 host_features; + + host_features = ifcvf_get_features(hw); + hw->req_features &= host_features; + + ifcvf_set_features(hw, hw->req_features); + ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_FEATURES_OK); + + if (!(ifcvf_get_status(hw) & IFCVF_CONFIG_STATUS_FEATURES_OK)) { + DEBUGOUT("failed to set FEATURES_OK status\n"); + return -1; + } + + return 0; +} + +STATIC void +io_write64_twopart(u64 val, u32 *lo, u32 *hi) +{ + IFCVF_WRITE_REG32(val & ((1ULL << 32) - 1), lo); + IFCVF_WRITE_REG32(val >> 32, hi); +} + +STATIC int +ifcvf_hw_enable(struct ifcvf_hw *hw) +{ + struct ifcvf_pci_common_cfg *cfg; + u8 *lm_cfg; + u32 i; + u16 notify_off; + + cfg = hw->common_cfg; + lm_cfg = hw->lm_cfg; + + IFCVF_WRITE_REG16(0, &cfg->msix_config); + if (IFCVF_READ_REG16(&cfg->msix_config) == IFCVF_MSI_NO_VECTOR) { + DEBUGOUT("msix vec alloc failed for device config\n"); + return -1; + } + + for (i = 0; i < hw->nr_vring; i++) { + IFCVF_WRITE_REG16(i, &cfg->queue_select); + io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, + &cfg->queue_desc_hi); + io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, + &cfg->queue_avail_hi); + io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo, + &cfg->queue_used_hi); + IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size); + + *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET + + (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4) = + (u32)hw->vring[i].last_avail_idx | + ((u32)hw->vring[i].last_used_idx << 16); + + IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector); + if (IFCVF_READ_REG16(&cfg->queue_msix_vector) == + IFCVF_MSI_NO_VECTOR) { + DEBUGOUT("queue %u, msix vec alloc failed\n", + i); + return -1; + } + + notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off); + hw->notify_addr[i] = (void *)((u8 *)hw->notify_base + + notify_off * hw->notify_off_multiplier); + IFCVF_WRITE_REG16(1, &cfg->queue_enable); + } + + return 0; +} + +STATIC void +ifcvf_hw_disable(struct ifcvf_hw *hw) +{ + u32 i; + struct ifcvf_pci_common_cfg *cfg; + u32 ring_state; + + cfg = hw->common_cfg; + + IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->msix_config); + for (i = 0; i < hw->nr_vring; i++) { + IFCVF_WRITE_REG16(i, &cfg->queue_select); + IFCVF_WRITE_REG16(0, &cfg->queue_enable); + IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->queue_msix_vector); + ring_state = *(u32 *)(hw->lm_cfg + IFCVF_LM_RING_STATE_OFFSET + + (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4); + hw->vring[i].last_avail_idx = (u16)ring_state; + hw->vring[i].last_used_idx = (u16)(ring_state >> 16); + } +} + +int +ifcvf_start_hw(struct ifcvf_hw *hw) +{ + ifcvf_reset(hw); + ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_ACK); + ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER); + + if (ifcvf_config_features(hw) < 0) + return -1; + + if (ifcvf_hw_enable(hw) < 0) + return -1; + + ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER_OK); + return 0; +} + +void +ifcvf_stop_hw(struct ifcvf_hw *hw) +{ + ifcvf_hw_disable(hw); + ifcvf_reset(hw); +} + +void +ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid) +{ + IFCVF_WRITE_REG16(qid, hw->notify_addr[qid]); +} + +u8 +ifcvf_get_notify_region(struct ifcvf_hw *hw) +{ + return hw->notify_region; +} + +u64 +ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid) +{ + return (u8 *)hw->notify_addr[qid] - + (u8 *)hw->mem_resource[hw->notify_region].addr; +} diff --git a/drivers/net/ifc/base/ifcvf.h b/drivers/net/ifc/base/ifcvf.h new file mode 100644 index 00000000..badacb61 --- /dev/null +++ b/drivers/net/ifc/base/ifcvf.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _IFCVF_H_ +#define _IFCVF_H_ + +#include "ifcvf_osdep.h" + +#define IFCVF_VENDOR_ID 0x1AF4 +#define IFCVF_DEVICE_ID 0x1041 +#define IFCVF_SUBSYS_VENDOR_ID 0x8086 +#define IFCVF_SUBSYS_DEVICE_ID 0x001A + +#define IFCVF_MAX_QUEUES 1 +#define VIRTIO_F_IOMMU_PLATFORM 33 + +/* Common configuration */ +#define IFCVF_PCI_CAP_COMMON_CFG 1 +/* Notifications */ +#define IFCVF_PCI_CAP_NOTIFY_CFG 2 +/* ISR Status */ +#define IFCVF_PCI_CAP_ISR_CFG 3 +/* Device specific configuration */ +#define IFCVF_PCI_CAP_DEVICE_CFG 4 +/* PCI configuration access */ +#define IFCVF_PCI_CAP_PCI_CFG 5 + +#define IFCVF_CONFIG_STATUS_RESET 0x00 +#define IFCVF_CONFIG_STATUS_ACK 0x01 +#define IFCVF_CONFIG_STATUS_DRIVER 0x02 +#define IFCVF_CONFIG_STATUS_DRIVER_OK 0x04 +#define IFCVF_CONFIG_STATUS_FEATURES_OK 0x08 +#define IFCVF_CONFIG_STATUS_FAILED 0x80 + +#define IFCVF_MSI_NO_VECTOR 0xffff +#define IFCVF_PCI_MAX_RESOURCE 6 + +#define IFCVF_LM_CFG_SIZE 0x40 +#define IFCVF_LM_RING_STATE_OFFSET 0x20 + +#define IFCVF_LM_LOGGING_CTRL 0x0 + +#define IFCVF_LM_BASE_ADDR_LOW 0x10 +#define IFCVF_LM_BASE_ADDR_HIGH 0x14 +#define IFCVF_LM_END_ADDR_LOW 0x18 +#define IFCVF_LM_END_ADDR_HIGH 0x1c + +#define IFCVF_LM_DISABLE 0x0 +#define IFCVF_LM_ENABLE_VF 0x1 +#define IFCVF_LM_ENABLE_PF 0x3 + +#define IFCVF_32_BIT_MASK 0xffffffff + + +struct ifcvf_pci_cap { + u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + u8 cap_next; /* Generic PCI field: next ptr. */ + u8 cap_len; /* Generic PCI field: capability length */ + u8 cfg_type; /* Identifies the structure. */ + u8 bar; /* Where to find it. */ + u8 padding[3]; /* Pad to full dword. */ + u32 offset; /* Offset within bar. */ + u32 length; /* Length of the structure, in bytes. */ +}; + +struct ifcvf_pci_notify_cap { + struct ifcvf_pci_cap cap; + u32 notify_off_multiplier; /* Multiplier for queue_notify_off. */ +}; + +struct ifcvf_pci_common_cfg { + /* About the whole device. */ + u32 device_feature_select; + u32 device_feature; + u32 guest_feature_select; + u32 guest_feature; + u16 msix_config; + u16 num_queues; + u8 device_status; + u8 config_generation; + + /* About a specific virtqueue. */ + u16 queue_select; + u16 queue_size; + u16 queue_msix_vector; + u16 queue_enable; + u16 queue_notify_off; + u32 queue_desc_lo; + u32 queue_desc_hi; + u32 queue_avail_lo; + u32 queue_avail_hi; + u32 queue_used_lo; + u32 queue_used_hi; +}; + +struct ifcvf_net_config { + u8 mac[6]; + u16 status; + u16 max_virtqueue_pairs; +} __attribute__((packed)); + +struct ifcvf_pci_mem_resource { + u64 phys_addr; /**< Physical address, 0 if not resource. */ + u64 len; /**< Length of the resource. */ + u8 *addr; /**< Virtual address, NULL when not mapped. */ +}; + +struct vring_info { + u64 desc; + u64 avail; + u64 used; + u16 size; + u16 last_avail_idx; + u16 last_used_idx; +}; + +struct ifcvf_hw { + u64 req_features; + u8 notify_region; + u32 notify_off_multiplier; + struct ifcvf_pci_common_cfg *common_cfg; + struct ifcvf_net_device_config *dev_cfg; + u8 *isr; + u16 *notify_base; + u16 *notify_addr[IFCVF_MAX_QUEUES * 2]; + u8 *lm_cfg; + struct vring_info vring[IFCVF_MAX_QUEUES * 2]; + u8 nr_vring; + struct ifcvf_pci_mem_resource mem_resource[IFCVF_PCI_MAX_RESOURCE]; +}; + +int +ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev); + +u64 +ifcvf_get_features(struct ifcvf_hw *hw); + +int +ifcvf_start_hw(struct ifcvf_hw *hw); + +void +ifcvf_stop_hw(struct ifcvf_hw *hw); + +void +ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid); + +u8 +ifcvf_get_notify_region(struct ifcvf_hw *hw); + +u64 +ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid); + +#endif /* _IFCVF_H_ */ diff --git a/drivers/net/ifc/base/ifcvf_osdep.h b/drivers/net/ifc/base/ifcvf_osdep.h new file mode 100644 index 00000000..cf151ef5 --- /dev/null +++ b/drivers/net/ifc/base/ifcvf_osdep.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _IFCVF_OSDEP_H_ +#define _IFCVF_OSDEP_H_ + +#include +#include + +#include +#include +#include +#include +#include + +#define DEBUGOUT(S, args...) RTE_LOG(DEBUG, PMD, S, ##args) +#define STATIC static + +#define msec_delay rte_delay_ms + +#define IFCVF_READ_REG8(reg) rte_read8(reg) +#define IFCVF_WRITE_REG8(val, reg) rte_write8((val), (reg)) +#define IFCVF_READ_REG16(reg) rte_read16(reg) +#define IFCVF_WRITE_REG16(val, reg) rte_write16((val), (reg)) +#define IFCVF_READ_REG32(reg) rte_read32(reg) +#define IFCVF_WRITE_REG32(val, reg) rte_write32((val), (reg)) + +typedef struct rte_pci_device PCI_DEV; + +#define PCI_READ_CONFIG_BYTE(dev, val, where) \ + rte_pci_read_config(dev, val, 1, where) + +#define PCI_READ_CONFIG_DWORD(dev, val, where) \ + rte_pci_read_config(dev, val, 4, where) + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef int16_t s16; +typedef uint32_t u32; +typedef int32_t s32; +typedef int64_t s64; +typedef uint64_t u64; + +static inline int +PCI_READ_CONFIG_RANGE(PCI_DEV *dev, uint32_t *val, int size, int where) +{ + return rte_pci_read_config(dev, val, size, where); +} + +#endif /* _IFCVF_OSDEP_H_ */ diff --git a/drivers/net/ifc/ifcvf_vdpa.c b/drivers/net/ifc/ifcvf_vdpa.c new file mode 100644 index 00000000..c6627c23 --- /dev/null +++ b/drivers/net/ifc/ifcvf_vdpa.c @@ -0,0 +1,792 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "base/ifcvf.h" + +#define DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + +static int ifcvf_vdpa_logtype; + +struct ifcvf_internal { + struct rte_vdpa_dev_addr dev_addr; + struct rte_pci_device *pdev; + struct ifcvf_hw hw; + int vfio_container_fd; + int vfio_group_fd; + int vfio_dev_fd; + pthread_t tid; /* thread for notify relay */ + int epfd; + int vid; + int did; + uint16_t max_queues; + uint64_t features; + rte_atomic32_t started; + rte_atomic32_t dev_attached; + rte_atomic32_t running; + rte_spinlock_t lock; +}; + +struct internal_list { + TAILQ_ENTRY(internal_list) next; + struct ifcvf_internal *internal; +}; + +TAILQ_HEAD(internal_list_head, internal_list); +static struct internal_list_head internal_list = + TAILQ_HEAD_INITIALIZER(internal_list); + +static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER; + +static struct internal_list * +find_internal_resource_by_did(int did) +{ + int found = 0; + struct internal_list *list; + + pthread_mutex_lock(&internal_list_lock); + + TAILQ_FOREACH(list, &internal_list, next) { + if (did == list->internal->did) { + found = 1; + break; + } + } + + pthread_mutex_unlock(&internal_list_lock); + + if (!found) + return NULL; + + return list; +} + +static struct internal_list * +find_internal_resource_by_dev(struct rte_pci_device *pdev) +{ + int found = 0; + struct internal_list *list; + + pthread_mutex_lock(&internal_list_lock); + + TAILQ_FOREACH(list, &internal_list, next) { + if (pdev == list->internal->pdev) { + found = 1; + break; + } + } + + pthread_mutex_unlock(&internal_list_lock); + + if (!found) + return NULL; + + return list; +} + +static int +ifcvf_vfio_setup(struct ifcvf_internal *internal) +{ + struct rte_pci_device *dev = internal->pdev; + char devname[RTE_DEV_NAME_MAX_LEN] = {0}; + int iommu_group_num; + int ret = 0; + int i; + + internal->vfio_dev_fd = -1; + internal->vfio_group_fd = -1; + internal->vfio_container_fd = -1; + + rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN); + rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname, + &iommu_group_num); + + internal->vfio_container_fd = rte_vfio_container_create(); + if (internal->vfio_container_fd < 0) + return -1; + + internal->vfio_group_fd = rte_vfio_container_group_bind( + internal->vfio_container_fd, iommu_group_num); + if (internal->vfio_group_fd < 0) + goto err; + + if (rte_pci_map_device(dev)) + goto err; + + internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd; + + for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE); + i++) { + internal->hw.mem_resource[i].addr = + internal->pdev->mem_resource[i].addr; + internal->hw.mem_resource[i].phys_addr = + internal->pdev->mem_resource[i].phys_addr; + internal->hw.mem_resource[i].len = + internal->pdev->mem_resource[i].len; + } + ret = ifcvf_init_hw(&internal->hw, internal->pdev); + + return ret; + +err: + rte_vfio_container_destroy(internal->vfio_container_fd); + return -1; +} + +static int +ifcvf_dma_map(struct ifcvf_internal *internal, int do_map) +{ + uint32_t i; + int ret; + struct rte_vhost_memory *mem = NULL; + int vfio_container_fd; + + ret = rte_vhost_get_mem_table(internal->vid, &mem); + if (ret < 0) { + DRV_LOG(ERR, "failed to get VM memory layout."); + goto exit; + } + + vfio_container_fd = internal->vfio_container_fd; + + for (i = 0; i < mem->nregions; i++) { + struct rte_vhost_mem_region *reg; + + reg = &mem->regions[i]; + DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", " + "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".", + do_map ? "DMA map" : "DMA unmap", i, + reg->host_user_addr, reg->guest_phys_addr, reg->size); + + if (do_map) { + ret = rte_vfio_container_dma_map(vfio_container_fd, + reg->host_user_addr, reg->guest_phys_addr, + reg->size); + if (ret < 0) { + DRV_LOG(ERR, "DMA map failed."); + goto exit; + } + } else { + ret = rte_vfio_container_dma_unmap(vfio_container_fd, + reg->host_user_addr, reg->guest_phys_addr, + reg->size); + if (ret < 0) { + DRV_LOG(ERR, "DMA unmap failed."); + goto exit; + } + } + } + +exit: + if (mem) + free(mem); + return ret; +} + +static uint64_t +qva_to_gpa(int vid, uint64_t qva) +{ + struct rte_vhost_memory *mem = NULL; + struct rte_vhost_mem_region *reg; + uint32_t i; + uint64_t gpa = 0; + + if (rte_vhost_get_mem_table(vid, &mem) < 0) + goto exit; + + for (i = 0; i < mem->nregions; i++) { + reg = &mem->regions[i]; + + if (qva >= reg->host_user_addr && + qva < reg->host_user_addr + reg->size) { + gpa = qva - reg->host_user_addr + reg->guest_phys_addr; + break; + } + } + +exit: + if (mem) + free(mem); + return gpa; +} + +static int +vdpa_ifcvf_start(struct ifcvf_internal *internal) +{ + struct ifcvf_hw *hw = &internal->hw; + int i, nr_vring; + int vid; + struct rte_vhost_vring vq; + uint64_t gpa; + + vid = internal->vid; + nr_vring = rte_vhost_get_vring_num(vid); + rte_vhost_get_negotiated_features(vid, &hw->req_features); + + for (i = 0; i < nr_vring; i++) { + rte_vhost_get_vhost_vring(vid, i, &vq); + gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for descriptor ring."); + return -1; + } + hw->vring[i].desc = gpa; + + gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for available ring."); + return -1; + } + hw->vring[i].avail = gpa; + + gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for used ring."); + return -1; + } + hw->vring[i].used = gpa; + + hw->vring[i].size = vq.size; + rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx, + &hw->vring[i].last_used_idx); + } + hw->nr_vring = i; + + return ifcvf_start_hw(&internal->hw); +} + +static void +vdpa_ifcvf_stop(struct ifcvf_internal *internal) +{ + struct ifcvf_hw *hw = &internal->hw; + uint32_t i; + int vid; + + vid = internal->vid; + ifcvf_stop_hw(hw); + + for (i = 0; i < hw->nr_vring; i++) + rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx, + hw->vring[i].last_used_idx); +} + +#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \ + sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1)) +static int +vdpa_enable_vfio_intr(struct ifcvf_internal *internal) +{ + int ret; + uint32_t i, nr_vring; + char irq_set_buf[MSIX_IRQ_SET_BUF_LEN]; + struct vfio_irq_set *irq_set; + int *fd_ptr; + struct rte_vhost_vring vring; + + nr_vring = rte_vhost_get_vring_num(internal->vid); + + irq_set = (struct vfio_irq_set *)irq_set_buf; + irq_set->argsz = sizeof(irq_set_buf); + irq_set->count = nr_vring + 1; + irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | + VFIO_IRQ_SET_ACTION_TRIGGER; + irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; + irq_set->start = 0; + fd_ptr = (int *)&irq_set->data; + fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd; + + for (i = 0; i < nr_vring; i++) { + rte_vhost_get_vhost_vring(internal->vid, i, &vring); + fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd; + } + + ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); + if (ret) { + DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s", + strerror(errno)); + return -1; + } + + return 0; +} + +static int +vdpa_disable_vfio_intr(struct ifcvf_internal *internal) +{ + int ret; + char irq_set_buf[MSIX_IRQ_SET_BUF_LEN]; + struct vfio_irq_set *irq_set; + + irq_set = (struct vfio_irq_set *)irq_set_buf; + irq_set->argsz = sizeof(irq_set_buf); + irq_set->count = 0; + irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER; + irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; + irq_set->start = 0; + + ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); + if (ret) { + DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s", + strerror(errno)); + return -1; + } + + return 0; +} + +static void * +notify_relay(void *arg) +{ + int i, kickfd, epfd, nfds = 0; + uint32_t qid, q_num; + struct epoll_event events[IFCVF_MAX_QUEUES * 2]; + struct epoll_event ev; + uint64_t buf; + int nbytes; + struct rte_vhost_vring vring; + struct ifcvf_internal *internal = (struct ifcvf_internal *)arg; + struct ifcvf_hw *hw = &internal->hw; + + q_num = rte_vhost_get_vring_num(internal->vid); + + epfd = epoll_create(IFCVF_MAX_QUEUES * 2); + if (epfd < 0) { + DRV_LOG(ERR, "failed to create epoll instance."); + return NULL; + } + internal->epfd = epfd; + + for (qid = 0; qid < q_num; qid++) { + ev.events = EPOLLIN | EPOLLPRI; + rte_vhost_get_vhost_vring(internal->vid, qid, &vring); + ev.data.u64 = qid | (uint64_t)vring.kickfd << 32; + if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) { + DRV_LOG(ERR, "epoll add error: %s", strerror(errno)); + return NULL; + } + } + + for (;;) { + nfds = epoll_wait(epfd, events, q_num, -1); + if (nfds < 0) { + if (errno == EINTR) + continue; + DRV_LOG(ERR, "epoll_wait return fail\n"); + return NULL; + } + + for (i = 0; i < nfds; i++) { + qid = events[i].data.u32; + kickfd = (uint32_t)(events[i].data.u64 >> 32); + do { + nbytes = read(kickfd, &buf, 8); + if (nbytes < 0) { + if (errno == EINTR || + errno == EWOULDBLOCK || + errno == EAGAIN) + continue; + DRV_LOG(INFO, "Error reading " + "kickfd: %s", + strerror(errno)); + } + break; + } while (1); + + ifcvf_notify_queue(hw, qid); + } + } + + return NULL; +} + +static int +setup_notify_relay(struct ifcvf_internal *internal) +{ + int ret; + + ret = pthread_create(&internal->tid, NULL, notify_relay, + (void *)internal); + if (ret) { + DRV_LOG(ERR, "failed to create notify relay pthread."); + return -1; + } + return 0; +} + +static int +unset_notify_relay(struct ifcvf_internal *internal) +{ + void *status; + + if (internal->tid) { + pthread_cancel(internal->tid); + pthread_join(internal->tid, &status); + } + internal->tid = 0; + + if (internal->epfd >= 0) + close(internal->epfd); + internal->epfd = -1; + + return 0; +} + +static int +update_datapath(struct ifcvf_internal *internal) +{ + int ret; + + rte_spinlock_lock(&internal->lock); + + if (!rte_atomic32_read(&internal->running) && + (rte_atomic32_read(&internal->started) && + rte_atomic32_read(&internal->dev_attached))) { + ret = ifcvf_dma_map(internal, 1); + if (ret) + goto err; + + ret = vdpa_enable_vfio_intr(internal); + if (ret) + goto err; + + ret = setup_notify_relay(internal); + if (ret) + goto err; + + ret = vdpa_ifcvf_start(internal); + if (ret) + goto err; + + rte_atomic32_set(&internal->running, 1); + } else if (rte_atomic32_read(&internal->running) && + (!rte_atomic32_read(&internal->started) || + !rte_atomic32_read(&internal->dev_attached))) { + vdpa_ifcvf_stop(internal); + + ret = unset_notify_relay(internal); + if (ret) + goto err; + + ret = vdpa_disable_vfio_intr(internal); + if (ret) + goto err; + + ret = ifcvf_dma_map(internal, 0); + if (ret) + goto err; + + rte_atomic32_set(&internal->running, 0); + } + + rte_spinlock_unlock(&internal->lock); + return 0; +err: + rte_spinlock_unlock(&internal->lock); + return ret; +} + +static int +ifcvf_dev_config(int vid) +{ + int did; + struct internal_list *list; + struct ifcvf_internal *internal; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + internal = list->internal; + internal->vid = vid; + rte_atomic32_set(&internal->dev_attached, 1); + update_datapath(internal); + + return 0; +} + +static int +ifcvf_dev_close(int vid) +{ + int did; + struct internal_list *list; + struct ifcvf_internal *internal; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + internal = list->internal; + rte_atomic32_set(&internal->dev_attached, 0); + update_datapath(internal); + + return 0; +} + +static int +ifcvf_get_vfio_group_fd(int vid) +{ + int did; + struct internal_list *list; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + return list->internal->vfio_group_fd; +} + +static int +ifcvf_get_vfio_device_fd(int vid) +{ + int did; + struct internal_list *list; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + return list->internal->vfio_dev_fd; +} + +static int +ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size) +{ + int did; + struct internal_list *list; + struct ifcvf_internal *internal; + struct vfio_region_info reg = { .argsz = sizeof(reg) }; + int ret; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + internal = list->internal; + + reg.index = ifcvf_get_notify_region(&internal->hw); + ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®); + if (ret) { + DRV_LOG(ERR, "Get not get device region info: %s", + strerror(errno)); + return -1; + } + + *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset; + *size = 0x1000; + + return 0; +} + +static int +ifcvf_get_queue_num(int did, uint32_t *queue_num) +{ + struct internal_list *list; + + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + *queue_num = list->internal->max_queues; + + return 0; +} + +static int +ifcvf_get_vdpa_features(int did, uint64_t *features) +{ + struct internal_list *list; + + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + *features = list->internal->features; + + return 0; +} + +#define VDPA_SUPPORTED_PROTOCOL_FEATURES \ + (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \ + 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) +static int +ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features) +{ + *features = VDPA_SUPPORTED_PROTOCOL_FEATURES; + return 0; +} + +struct rte_vdpa_dev_ops ifcvf_ops = { + .get_queue_num = ifcvf_get_queue_num, + .get_features = ifcvf_get_vdpa_features, + .get_protocol_features = ifcvf_get_protocol_features, + .dev_conf = ifcvf_dev_config, + .dev_close = ifcvf_dev_close, + .set_vring_state = NULL, + .set_features = NULL, + .migration_done = NULL, + .get_vfio_group_fd = ifcvf_get_vfio_group_fd, + .get_vfio_device_fd = ifcvf_get_vfio_device_fd, + .get_notify_area = ifcvf_get_notify_area, +}; + +static int +ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + uint64_t features; + struct ifcvf_internal *internal = NULL; + struct internal_list *list = NULL; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + list = rte_zmalloc("ifcvf", sizeof(*list), 0); + if (list == NULL) + goto error; + + internal = rte_zmalloc("ifcvf", sizeof(*internal), 0); + if (internal == NULL) + goto error; + + internal->pdev = pci_dev; + rte_spinlock_init(&internal->lock); + if (ifcvf_vfio_setup(internal) < 0) + return -1; + + internal->max_queues = IFCVF_MAX_QUEUES; + features = ifcvf_get_features(&internal->hw); + internal->features = (features & + ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) | + (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); + + internal->dev_addr.pci_addr = pci_dev->addr; + internal->dev_addr.type = PCI_ADDR; + list->internal = internal; + + pthread_mutex_lock(&internal_list_lock); + TAILQ_INSERT_TAIL(&internal_list, list, next); + pthread_mutex_unlock(&internal_list_lock); + + internal->did = rte_vdpa_register_device(&internal->dev_addr, + &ifcvf_ops); + if (internal->did < 0) + goto error; + + rte_atomic32_set(&internal->started, 1); + update_datapath(internal); + + return 0; + +error: + rte_free(list); + rte_free(internal); + return -1; +} + +static int +ifcvf_pci_remove(struct rte_pci_device *pci_dev) +{ + struct ifcvf_internal *internal; + struct internal_list *list; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + list = find_internal_resource_by_dev(pci_dev); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device: %s", pci_dev->name); + return -1; + } + + internal = list->internal; + rte_atomic32_set(&internal->started, 0); + update_datapath(internal); + + rte_pci_unmap_device(internal->pdev); + rte_vfio_container_destroy(internal->vfio_container_fd); + rte_vdpa_unregister_device(internal->did); + + pthread_mutex_lock(&internal_list_lock); + TAILQ_REMOVE(&internal_list, list, next); + pthread_mutex_unlock(&internal_list_lock); + + rte_free(list); + rte_free(internal); + + return 0; +} + +/* + * IFCVF has the same vendor ID and device ID as virtio net PCI + * device, with its specific subsystem vendor ID and device ID. + */ +static const struct rte_pci_id pci_id_ifcvf_map[] = { + { .class_id = RTE_CLASS_ANY_ID, + .vendor_id = IFCVF_VENDOR_ID, + .device_id = IFCVF_DEVICE_ID, + .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID, + .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID, + }, + + { .vendor_id = 0, /* sentinel */ + }, +}; + +static struct rte_pci_driver rte_ifcvf_vdpa = { + .id_table = pci_id_ifcvf_map, + .drv_flags = 0, + .probe = ifcvf_pci_probe, + .remove = ifcvf_pci_remove, +}; + +RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa); +RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci"); + +RTE_INIT(ifcvf_vdpa_init_log); +static void +ifcvf_vdpa_init_log(void) +{ + ifcvf_vdpa_logtype = rte_log_register("pmd.net.ifcvf_vdpa"); + if (ifcvf_vdpa_logtype >= 0) + rte_log_set_level(ifcvf_vdpa_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/ifc/rte_ifcvf_version.map b/drivers/net/ifc/rte_ifcvf_version.map new file mode 100644 index 00000000..9b9ab1a4 --- /dev/null +++ b/drivers/net/ifc/rte_ifcvf_version.map @@ -0,0 +1,4 @@ +DPDK_18.05 { + + local: *; +}; diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index d0804fc5..7b6af353 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -20,9 +20,10 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 -CFLAGS_ixgbe_rxtx.o += -wd3656 +CFLAGS_ixgbe_rxtx.o += -diag-disable 3656 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) # @@ -103,6 +104,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c endif SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf_representor.c # install this header file SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 44832585..87d2ad09 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -60,9 +59,6 @@ */ #define IXGBE_FC_LO 0x40 -/* Default minimum inter-interrupt interval for EITR configuration */ -#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E - /* Timer value included in XOFF frames. */ #define IXGBE_FC_PAUSE 0x680 @@ -101,8 +97,6 @@ #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) -#define IXGBE_HKEY_MAX_INDEX 10 - /* Additional timesync values. */ #define NSEC_PER_SEC 1000000000L #define IXGBE_INCVAL_10GB 0x66666666 @@ -118,7 +112,6 @@ #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 -#define DEFAULT_ETAG_ETYPE 0x893f #define IXGBE_ETAG_ETYPE 0x00005084 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff #define IXGBE_ETAG_ETYPE_VALID 0x80000000 @@ -133,7 +126,7 @@ #define IXGBE_EXVET_VET_EXT_SHIFT 16 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 -static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); +static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); @@ -196,6 +189,9 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, + int mask); +static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); @@ -228,7 +224,7 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param); static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); -static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, +static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); static bool is_device_supported(struct rte_eth_dev *dev, @@ -244,8 +240,8 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, static void ixgbevf_dev_stop(struct rte_eth_dev *dev); static void ixgbevf_dev_close(struct rte_eth_dev *dev); static int ixgbevf_dev_reset(struct rte_eth_dev *dev); -static void ixgbevf_intr_disable(struct ixgbe_hw *hw); -static void ixgbevf_intr_enable(struct ixgbe_hw *hw); +static void ixgbevf_intr_disable(struct rte_eth_dev *dev); +static void ixgbevf_intr_enable(struct rte_eth_dev *dev); static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); @@ -253,6 +249,7 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, @@ -286,7 +283,7 @@ static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); -static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, +static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter); @@ -328,6 +325,11 @@ static int ixgbe_get_eeprom(struct rte_eth_dev *dev, static int ixgbe_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); +static int ixgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); static int ixgbevf_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs); @@ -565,6 +567,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .get_eeprom_length = ixgbe_get_eeprom_length, .get_eeprom = ixgbe_get_eeprom, .set_eeprom = ixgbe_set_eeprom, + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, .get_dcb_info = ixgbe_dev_get_dcb_info, .timesync_adjust_time = ixgbe_timesync_adjust_time, .timesync_read_time = ixgbe_timesync_read_time, @@ -787,58 +791,6 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ sizeof(rte_ixgbevf_stats_strings[0])) -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - /* * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. */ @@ -1096,7 +1048,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) * It returns 0 on success. */ static int -eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) +eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; @@ -1339,6 +1291,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; + int retries = 0; + int ret; PMD_INIT_FUNC_TRACE(); @@ -1359,8 +1313,20 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); - rte_intr_callback_unregister(intr_handle, - ixgbe_dev_interrupt_handler, eth_dev); + + do { + ret = rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + return ret; + } + rte_delay_ms(100); + } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); /* uninitialize PF if max_vfs not zero */ ixgbe_pf_host_uninit(eth_dev); @@ -1522,7 +1488,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) } l2_tn_info->e_tag_en = FALSE; l2_tn_info->e_tag_fwd_en = FALSE; - l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE; + l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG; return 0; } @@ -1641,7 +1607,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) ixgbevf_dev_stats_reset(eth_dev); /* Disable the interrupts for VF */ - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(eth_dev); hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ diag = hw->mac.ops.reset_hw(hw); @@ -1710,7 +1676,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) rte_intr_callback_register(intr_handle, ixgbevf_dev_interrupt_handler, eth_dev); rte_intr_enable(intr_handle); - ixgbevf_intr_enable(hw); + ixgbevf_intr_enable(eth_dev); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", eth_dev->data->port_id, pci_dev->id.vendor_id, @@ -1743,7 +1709,7 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = NULL; /* Disable the interrupts for VF */ - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(eth_dev); rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; @@ -1755,16 +1721,81 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) return 0; } -static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) -{ - return rte_eth_dev_pci_generic_probe(pci_dev, - sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init); +static int +eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *pf_ethdev; + struct rte_eth_devargs eth_da; + int i, retval; + + if (pci_dev->device.devargs) { + retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, + ð_da); + if (retval) + return retval; + } else + memset(ð_da, 0, sizeof(eth_da)); + + retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, + sizeof(struct ixgbe_adapter), + eth_dev_pci_specific_init, pci_dev, + eth_ixgbe_dev_init, NULL); + + if (retval || eth_da.nb_representor_ports < 1) + return retval; + + pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (pf_ethdev == NULL) + return -ENODEV; + + /* probe VF representor ports */ + for (i = 0; i < eth_da.nb_representor_ports; i++) { + struct ixgbe_vf_info *vfinfo; + struct ixgbe_vf_representor representor; + + vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( + pf_ethdev->data->dev_private); + if (vfinfo == NULL) { + PMD_DRV_LOG(ERR, + "no virtual functions supported by PF"); + break; + } + + representor.vf_id = eth_da.representor_ports[i]; + representor.switch_domain_id = vfinfo->switch_domain_id; + representor.pf_ethdev = pf_ethdev; + + /* representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_representor_%d", + pci_dev->device.name, + eth_da.representor_ports[i]); + + retval = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct ixgbe_vf_representor), NULL, NULL, + ixgbe_vf_representor_init, &representor); + + if (retval) + PMD_DRV_LOG(ERR, "failed to create ixgbe vf " + "representor %s.", name); + } + + return 0; } static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit); + struct rte_eth_dev *ethdev; + + ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (!ethdev) + return -ENODEV; + + if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) + return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit); + else + return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit); } static struct rte_pci_driver rte_ixgbe_pmd = { @@ -1947,10 +1978,13 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) rxq = dev->data->rx_queues[queue]; - if (on) + if (on) { rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; - else + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else { rxq->vlan_flags = PKT_RX_VLAN; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } } static void @@ -2001,64 +2035,6 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); } -void -ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) -{ - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t ctrl; - uint16_t i; - struct ixgbe_rx_queue *rxq; - - PMD_INIT_FUNC_TRACE(); - - if (hw->mac.type == ixgbe_mac_82598EB) { - ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - ctrl &= ~IXGBE_VLNCTRL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); - - /* record those setting for HW strip per queue */ - ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0); - } - } -} - -void -ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) -{ - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t ctrl; - uint16_t i; - struct ixgbe_rx_queue *rxq; - - PMD_INIT_FUNC_TRACE(); - - if (hw->mac.type == ixgbe_mac_82598EB) { - ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - ctrl |= IXGBE_VLNCTRL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); - - /* record those setting for HW strip per queue */ - ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); - } - } -} - static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) { @@ -2114,25 +2090,93 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) */ } -static int -ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +void +ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + uint32_t ctrl; + uint16_t i; + struct ixgbe_rx_queue *rxq; + bool on; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + ctrl |= IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + } else { + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + ctrl &= ~IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + } + } else { + /* + * Other 10G NIC, the VLAN strip can be setup + * per queue in RXDCTL + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + ctrl |= IXGBE_RXDCTL_VME; + on = TRUE; + } else { + ctrl &= ~IXGBE_RXDCTL_VME; + on = FALSE; + } + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); + + /* record those setting for HW strip per queue */ + ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); + } + } +} + +static void +ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) { + uint16_t i; + struct rte_eth_rxmode *rxmode; + struct ixgbe_rx_queue *rxq; + if (mask & ETH_VLAN_STRIP_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_strip) - ixgbe_vlan_hw_strip_enable_all(dev); + rxmode = &dev->data->dev_conf.rxmode; + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else - ixgbe_vlan_hw_strip_disable_all(dev); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + } +} + +static int +ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; + + if (mask & ETH_VLAN_STRIP_MASK) { + ixgbe_vlan_hw_strip_config(dev); } if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ixgbe_vlan_hw_filter_enable(dev); else ixgbe_vlan_hw_filter_disable(dev); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) ixgbe_vlan_hw_extend_enable(dev); else ixgbe_vlan_hw_extend_disable(dev); @@ -2141,6 +2185,16 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) return 0; } +static int +ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbe_vlan_offload_config(dev, mask); + + return 0; +} + static void ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) { @@ -2289,11 +2343,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { const struct rte_eth_dcb_rx_conf *conf; - if (nb_rx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->rx_adv_conf.dcb_rx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -2307,11 +2356,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { const struct rte_eth_dcb_tx_conf *conf; - if (nb_tx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->tx_adv_conf.dcb_tx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -2480,6 +2524,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) uint32_t intr_vector = 0; int err, link_up = 0, negotiate = 0; uint32_t speed = 0; + uint32_t allowed_speeds = 0; int mask = 0; int status; uint16_t vf, idx; @@ -2561,7 +2606,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - err = ixgbe_vlan_offload_set(dev, mask); + err = ixgbe_vlan_offload_config(dev, mask); if (err) { PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); goto error; @@ -2628,9 +2673,21 @@ ixgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | + ETH_LINK_SPEED_10G; + break; + default: + allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G; + } + link_speeds = &dev->data->dev_conf.link_speeds; - if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_10G)) { + if (*link_speeds & ~allowed_speeds) { PMD_INIT_LOG(ERR, "Invalid link setting"); goto error; } @@ -2656,6 +2713,10 @@ ixgbe_dev_start(struct rte_eth_dev *dev) } else { if (*link_speeds & ETH_LINK_SPEED_10G) speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_5G) + speed |= IXGBE_LINK_SPEED_5GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_2_5G) + speed |= IXGBE_LINK_SPEED_2_5GB_FULL; if (*link_speeds & ETH_LINK_SPEED_1G) speed |= IXGBE_LINK_SPEED_1GB_FULL; if (*link_speeds & ETH_LINK_SPEED_100M) @@ -2666,6 +2727,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; + ixgbe_dev_link_update(dev, 0); + skip_link_setup: if (rte_intr_allow_others(intr_handle)) { @@ -2757,7 +2820,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* Clear recorded link status */ memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -2881,7 +2944,7 @@ ixgbe_dev_reset(struct rte_eth_dev *dev) if (ret) return ret; - ret = eth_ixgbe_dev_init(dev); + ret = eth_ixgbe_dev_init(dev, NULL); return ret; } @@ -3625,7 +3688,6 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; - dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; if (RTE_ETH_DEV_SRIOV(dev).active == 0) { @@ -3647,54 +3709,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) else dev_info->max_vmdq_pools = ETH_64_POOLS; dev_info->vmdq_queue_num = dev_info->max_rx_queues; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; - - /* - * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV - * mode. - */ - if ((hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) && - !RTE_ETH_DEV_SRIOV(dev).active) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; - - if (hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x || - hw->mac.type == ixgbe_mac_X550EM_a) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; - - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; - - if (hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x || - hw->mac.type == ixgbe_mac_X550EM_a) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; - -#ifdef RTE_LIBRTE_SECURITY - if (dev->security_ctx) { - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; - } -#endif + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3704,6 +3723,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3714,8 +3734,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -3784,7 +3803,6 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ @@ -3796,17 +3814,11 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->max_vmdq_pools = ETH_16_POOLS; else dev_info->max_vmdq_pools = ETH_64_POOLS; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3816,6 +3828,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -3826,8 +3839,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, }, .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -3863,7 +3875,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs * before the link status is correct */ - if (mac->type == ixgbe_mac_82599_vf) { + if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { int i; for (i = 0; i < 5; i++) { @@ -3941,12 +3953,12 @@ out: } /* return 0 means link status changed, -1 means not changed */ -static int +int ixgbe_dev_link_update_share(struct rte_eth_dev *dev, int wait_to_complete, int vf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); @@ -3956,12 +3968,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, int wait = 1; bool autoneg = false; + memset(&link, 0, sizeof(link)); link.link_status = ETH_LINK_DOWN; - link.link_speed = 0; + link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_autoneg = ETH_LINK_AUTONEG; - memset(&old, 0, sizeof(old)); - rte_ixgbe_dev_atomic_read_link_status(dev, &old); hw->mac.get_link_status = true; @@ -3985,19 +3996,14 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, if (diag != 0) { link.link_speed = ETH_SPEED_NUM_100M; link.link_duplex = ETH_LINK_FULL_DUPLEX; - rte_ixgbe_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; - return 0; + return rte_eth_linkstatus_set(dev, &link); } if (link_up == 0) { - rte_ixgbe_dev_atomic_write_link_status(dev, &link); intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; - if (link.link_status == old.link_status) - return -1; - return 0; + return rte_eth_linkstatus_set(dev, &link); } + intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; @@ -4029,12 +4035,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, link.link_speed = ETH_SPEED_NUM_10G; break; } - rte_ixgbe_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; - - return 0; + return rte_eth_linkstatus_set(dev, &link); } static int @@ -4233,8 +4235,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; - memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", (int)(dev->data->port_id), @@ -4269,7 +4271,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int64_t timeout; - struct rte_eth_link link; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4286,9 +4287,10 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, } if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + struct rte_eth_link link; + /* get the link status before link update, for predicting later */ - memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); ixgbe_dev_link_update(dev, 0); @@ -4853,14 +4855,15 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) ixgbe_clear_rar(hw, index); } -static void +static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); ixgbe_remove_rar(dev, 0); - ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); + + return 0; } static bool @@ -4909,10 +4912,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 |= IXGBE_HLREG0_JUMBOEN; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; } IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -4932,19 +4937,32 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * Virtual Function operations */ static void -ixgbevf_intr_disable(struct ixgbe_hw *hw) +ixgbevf_intr_disable(struct rte_eth_dev *dev) { + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + PMD_INIT_FUNC_TRACE(); /* Clear interrupt mask to stop from interrupts being generated */ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); IXGBE_WRITE_FLUSH(hw); + + /* Clear mask value. */ + intr->mask = 0; } static void -ixgbevf_intr_enable(struct ixgbe_hw *hw) +ixgbevf_intr_enable(struct rte_eth_dev *dev) { + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + PMD_INIT_FUNC_TRACE(); /* VF enable interrupt autoclean */ @@ -4953,6 +4971,9 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); IXGBE_WRITE_FLUSH(hw); + + /* Save IXGBE_VTEIMS value to mask. */ + intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; } static int @@ -4970,14 +4991,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC - if (!conf->rxmode.hw_strip_crc) { + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 1; + conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; } #else - if (conf->rxmode.hw_strip_crc) { + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 0; + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; } #endif @@ -5030,7 +5051,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) /* Set HW strip */ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - err = ixgbevf_vlan_offload_set(dev, mask); + err = ixgbevf_vlan_offload_config(dev, mask); if (err) { PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); ixgbe_dev_clear_queues(dev); @@ -5039,6 +5060,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) ixgbevf_dev_rxtx_start(dev); + ixgbevf_dev_link_update(dev, 0); + /* check and configure queue intr-vector mapping */ if (rte_intr_cap_multiple(intr_handle) && dev->data->dev_conf.intr_conf.rxq) { @@ -5074,7 +5097,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) rte_intr_enable(intr_handle); /* Re-enable interrupt for VF */ - ixgbevf_intr_enable(hw); + ixgbevf_intr_enable(dev); return 0; } @@ -5088,7 +5111,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(dev); hw->adapter_stopped = 1; ixgbe_stop_adapter(hw); @@ -5226,24 +5249,34 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) } static int -ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) { - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_rx_queue *rxq; uint16_t i; int on = 0; /* VF function only support hw strip feature, others are not support */ if (mask & ETH_VLAN_STRIP_MASK) { - on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); - - for (i = 0; i < hw->mac.max_rx_queues; i++) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); ixgbevf_vlan_strip_queue_set(dev, i, on); + } } return 0; } +static int +ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbevf_vlan_offload_config(dev, mask); + + return 0; +} + int ixgbe_vt_check(struct ixgbe_hw *hw) { @@ -5580,17 +5613,17 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - uint32_t mask; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t vec = IXGBE_MISC_VEC_ID; - mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); if (rte_intr_allow_others(intr_handle)) vec = IXGBE_RX_VEC_START; - mask |= (1 << vec); + intr->mask |= (1 << vec); RTE_SET_USED(queue_id); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); rte_intr_enable(intr_handle); @@ -5600,19 +5633,19 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - uint32_t mask; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t vec = IXGBE_MISC_VEC_ID; - mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); if (rte_intr_allow_others(intr_handle)) vec = IXGBE_RX_VEC_START; - mask &= ~(1 << vec); + intr->mask &= ~(1 << vec); RTE_SET_USED(queue_id); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); return 0; } @@ -5778,6 +5811,13 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) if (vector_idx < base + intr_handle->nb_efd - 1) vector_idx++; } + + /* As RX queue setting above show, all queues use the vector 0. + * Set only the ITR value of IXGBE_MISC_VEC_ID. + */ + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), + IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | IXGBE_EITR_CNT_WDIS); } /** @@ -5799,8 +5839,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* won't configure msix register if no mapping is done * between intr vector and event fd + * but if misx has been enabled already, need to configure + * auto clean, auto mask and throttling. */ - if (!rte_intr_dp_is_en(intr_handle)) + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + if (!rte_intr_dp_is_en(intr_handle) && + !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) return; if (rte_intr_allow_others(intr_handle)) @@ -5824,30 +5868,34 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* Populate the IVAR table and set the ITR values to the * corresponding register. */ - for (queue_id = 0; queue_id < dev->data->nb_rx_queues; - queue_id++) { - /* by default, 1:1 mapping */ - ixgbe_set_ivar_map(hw, 0, queue_id, vec); - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + if (rte_intr_dp_is_en(intr_handle)) { + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + ixgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, - IXGBE_MISC_VEC_ID); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); - break; - default: - break; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar_map(hw, -1, + IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MISC_VEC_ID); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); + break; + default: + break; + } } IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), - IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); + IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | IXGBE_EITR_CNT_WDIS); /* set up to autoclear timer, and the vectors */ mask = IXGBE_EIMS_ENABLE_MASK; @@ -5863,6 +5911,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode; uint32_t rf_dec, rf_int; uint32_t bcnrc_val; uint16_t link_speed = dev->data->dev_link.link_speed; @@ -5884,14 +5933,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, bcnrc_val = 0; } + rxmode = &dev->data->dev_conf.rxmode; /* * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise * set as 0x4. */ - if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len >= - IXGBE_MAX_JUMBO_FRAME_SIZE)) + if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && + (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME); else @@ -5983,12 +6032,14 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) } } -static void +static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); + + return 0; } int @@ -6238,7 +6289,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* refuse mtu that requires the support of scattered packets when this * feature has not been enabled before. */ - if (!rx_conf->enable_scatter && + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) && (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) return -EINVAL; @@ -6812,9 +6863,8 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev) uint32_t shift = 0; /* Get current link speed. */ - memset(&link, 0, sizeof(link)); ixgbe_dev_link_update(dev, 1); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); switch (link.link_speed) { case ETH_SPEED_NUM_100M: @@ -7166,6 +7216,78 @@ ixgbe_set_eeprom(struct rte_eth_dev *dev, return eeprom->ops.write_buffer(hw, first, length, data); } +static int +ixgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t status; + uint8_t sff8472_rev, addr_mode; + bool page_swap = false; + + /* Check whether we support SFF-8472 or not */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + return -EIO; + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + return -EIO; + + if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " + "type to the driver maintainers."); + page_swap = true; + } + + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int +ixgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; + uint8_t databyte = 0xFF; + uint8_t *data = info->data; + uint32_t i = 0; + + if (info->length == 0) + return -EINVAL; + + for (i = info->offset; i < info->offset + info->length; i++) { + if (i < RTE_ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + return -EIO; + + data[i - info->offset] = databyte; + } + + return 0; +} + uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { switch (mac_type) { @@ -8169,7 +8291,7 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(dev); /* read-on-clear nic registers here */ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); @@ -8186,7 +8308,6 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); @@ -8195,7 +8316,7 @@ ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) intr->flags &= ~IXGBE_FLAG_MAILBOX; } - ixgbevf_intr_enable(hw); + ixgbevf_intr_enable(dev); return 0; } @@ -8334,7 +8455,7 @@ ixgbe_rss_filter_restore(struct rte_eth_dev *dev) struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) ixgbe_config_rss_filter(dev, &filter_info->rss_info, TRUE); } diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index c56d6524..e42ec30d 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -4,6 +4,9 @@ #ifndef _IXGBE_ETHDEV_H_ #define _IXGBE_ETHDEV_H_ + +#include + #include "base/ixgbe_type.h" #include "base/ixgbe_dcb.h" #include "base/ixgbe_dcb_82599.h" @@ -12,6 +15,7 @@ #ifdef RTE_LIBRTE_SECURITY #include "ixgbe_ipsec.h" #endif +#include #include #include #include @@ -39,6 +43,7 @@ #define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */ #define IXGBE_VFTA_SIZE 128 #define IXGBE_VLAN_TAG_SIZE 4 +#define IXGBE_HKEY_MAX_INDEX 10 #define IXGBE_MAX_RX_QUEUE_NUM 128 #define IXGBE_MAX_INTR_QUEUE_NUM 15 #define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM @@ -57,6 +62,7 @@ (((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \ IXGBE_EITR_ITR_INT_MASK) +#define IXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ /* Loopback operation modes */ /* 82599 specific loopback operation types */ @@ -196,8 +202,8 @@ struct ixgbe_hw_fdir_info { }; struct ixgbe_rte_flow_rss_conf { - struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */ - uint16_t num; /**< Number of entries in queue[]. */ + struct rte_flow_action_rss conf; /**< RSS parameters. */ + uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ }; @@ -253,6 +259,7 @@ struct ixgbe_vf_info { uint16_t vlan_count; uint8_t spoofchk_enabled; uint8_t api_version; + uint16_t switch_domain_id; }; /* @@ -480,6 +487,15 @@ struct ixgbe_adapter { struct ixgbe_tm_conf tm_conf; }; +struct ixgbe_vf_representor { + uint16_t vf_id; + uint16_t switch_domain_id; + struct rte_eth_dev *pf_ethdev; +}; + +int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); +int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev); + #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ (&((struct ixgbe_adapter *)adapter)->hw) @@ -652,6 +668,10 @@ int ixgbe_fdir_filter_program(struct rte_eth_dev *dev, void ixgbe_configure_dcb(struct rte_eth_dev *dev); +int +ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait_to_complete, int vf); + /* * misc function prototypes */ @@ -659,9 +679,7 @@ void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev); void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); -void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev); - -void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev); +void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev); void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev); @@ -698,6 +716,10 @@ void ixgbe_tm_conf_init(struct rte_eth_dev *dev); void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev); int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); +int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); int ixgbe_config_rss_filter(struct rte_eth_dev *dev, struct ixgbe_rte_flow_rss_conf *conf, bool add); diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index dcbfb38b..eb0644c8 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -264,8 +264,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } /* Skip Ethernet */ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -298,8 +298,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; /*Not supported last point for range*/ if (item->last) { rte_flow_error_set(error, @@ -346,7 +346,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; /** * Only support src & dst addresses, protocol, * others should be masked. @@ -368,7 +368,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->src_ip_mask = ipv4_mask->hdr.src_addr; filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; filter->dst_ip = ipv4_spec->hdr.dst_addr; filter->src_ip = ipv4_spec->hdr.src_addr; filter->proto = ipv4_spec->hdr.next_proto_id; @@ -413,7 +413,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; /** * Only support src & dst ports, tcp flags, @@ -447,12 +447,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; /** * Only support src & dst ports, @@ -471,11 +471,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) { - sctp_mask = (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; /** * Only support src & dst ports, @@ -494,7 +494,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = sctp_mask->hdr.dst_port; filter->src_port_mask = sctp_mask->hdr.src_port; - sctp_spec = (const struct rte_flow_item_sctp *)item->spec; + sctp_spec = item->spec; filter->dst_port = sctp_spec->hdr.dst_port; filter->src_port = sctp_spec->hdr.src_port; } else { @@ -557,6 +557,15 @@ action: return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -699,8 +708,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Mask bits of source MAC address must be full of 0. * Mask bits of destination MAC address must be full @@ -786,6 +795,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, @@ -1000,8 +1017,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port || @@ -1078,6 +1095,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Support 2 priorities, the lowest or highest. */ if (!attr->priority) { filter->hig_pri = 0; @@ -1198,8 +1224,8 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } - e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec; - e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask; + e_tag_spec = item->spec; + e_tag_mask = item->mask; /* Only care about GRP and E cid base. */ if (e_tag_mask->epcp_edei_in_ecid_b || @@ -1249,6 +1275,15 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* not supported */ if (attr->priority) { memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf)); @@ -1353,6 +1388,15 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* not supported */ if (attr->priority) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -1447,12 +1491,9 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[]) break; if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) { - spec = - (const struct rte_flow_item_fuzzy *)item->spec; - last = - (const struct rte_flow_item_fuzzy *)item->last; - mask = - (const struct rte_flow_item_fuzzy *)item->mask; + spec = item->spec; + last = item->last; + mask = item->mask; if (!spec || !mask) return 0; @@ -1632,7 +1673,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_spec = item->spec; /* Get the dst MAC. */ for (j = 0; j < ETHER_ADDR_LEN; j++) { @@ -1645,7 +1686,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->mask) { rule->b_mask = TRUE; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_mask = item->mask; /* Ether type should be masked. */ if (eth_mask->type || @@ -1725,8 +1766,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; @@ -1772,8 +1813,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - ipv4_mask = - (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.type_of_service || ipv4_mask->hdr.total_length || @@ -1793,8 +1833,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - ipv4_spec = - (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; rule->ixgbe_fdir.formatted.dst_ip[0] = ipv4_spec->hdr.dst_addr; rule->ixgbe_fdir.formatted.src_ip[0] = @@ -1844,8 +1883,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, } rule->b_mask = TRUE; - ipv6_mask = - (const struct rte_flow_item_ipv6 *)item->mask; + ipv6_mask = item->mask; if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || ipv6_mask->hdr.proto || @@ -1885,8 +1923,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - ipv6_spec = - (const struct rte_flow_item_ipv6 *)item->spec; + ipv6_spec = item->spec; rte_memcpy(rule->ixgbe_fdir.formatted.src_ip, ipv6_spec->hdr.src_addr, 16); rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip, @@ -1938,7 +1975,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || tcp_mask->hdr.data_off || @@ -1957,7 +1994,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = tcp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2003,7 +2040,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2017,7 +2054,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = udp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2068,8 +2105,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } rule->b_mask = TRUE; - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; if (sctp_mask->hdr.tag || sctp_mask->hdr.cksum) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); @@ -2083,8 +2119,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, if (item->spec) { rule->b_spec = TRUE; - sctp_spec = - (const struct rte_flow_item_sctp *)item->spec; + sctp_spec = item->spec; rule->ixgbe_fdir.formatted.src_port = sctp_spec->hdr.src_port; rule->ixgbe_fdir.formatted.dst_port = @@ -2092,8 +2127,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, } /* others even sctp port is not supported */ } else { - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; if (sctp_mask && (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port || @@ -2136,7 +2170,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_mask = item->mask; /* check mask */ if (raw_mask->relative != 0x1 || @@ -2152,7 +2186,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, return -rte_errno; } - raw_spec = (const struct rte_flow_item_raw *)item->spec; + raw_spec = item->spec; /* check spec */ if (raw_spec->relative != 0 || @@ -2425,8 +2459,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Tunnel type is always meaningful. */ rule->mask.tunnel_type_mask = 1; - vxlan_mask = - (const struct rte_flow_item_vxlan *)item->mask; + vxlan_mask = item->mask; if (vxlan_mask->flags) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2452,8 +2485,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - vxlan_spec = (const struct rte_flow_item_vxlan *) - item->spec; + vxlan_spec = item->spec; rte_memcpy(((uint8_t *) &rule->ixgbe_fdir.formatted.tni_vni + 1), vxlan_spec->vni, RTE_DIM(vxlan_spec->vni)); @@ -2490,8 +2522,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, /* Tunnel type is always meaningful. */ rule->mask.tunnel_type_mask = 1; - nvgre_mask = - (const struct rte_flow_item_nvgre *)item->mask; + nvgre_mask = item->mask; if (nvgre_mask->flow_id) { memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); rte_flow_error_set(error, EINVAL, @@ -2534,8 +2565,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - nvgre_spec = - (const struct rte_flow_item_nvgre *)item->spec; + nvgre_spec = item->spec; if (nvgre_spec->c_k_s_rsvd0_ver != rte_cpu_to_be_16(0x2000) && nvgre_mask->c_k_s_rsvd0_ver) { @@ -2591,7 +2621,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } rule->b_mask = TRUE; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_mask = item->mask; /* Ether type should be masked. */ if (eth_mask->type) { @@ -2632,7 +2662,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, if (item->spec) { rule->b_spec = TRUE; - eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_spec = item->spec; /* Get the dst MAC. */ for (j = 0; j < ETHER_ADDR_LEN; j++) { @@ -2671,8 +2701,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr, return -rte_errno; } - vlan_spec = (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci; @@ -2775,7 +2805,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, rss = (const struct rte_flow_action_rss *)act->conf; - if (!rss || !rss->num) { + if (!rss || !rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -2783,7 +2813,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } - for (n = 0; n < rss->num; n++) { + for (n = 0; n < rss->queue_num; n++) { if (rss->queue[n] >= dev->data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -2792,14 +2822,27 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } } - if (rss->rss_conf) - rss_conf->rss_conf = *rss->rss_conf; - else - rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL; - for (n = 0; n < rss->num; ++n) - rss_conf->queue[n] = rss->queue[n]; - rss_conf->num = rss->num; + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (ixgbe_rss_conf_init(rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); /* check if the next not void item is END */ act = next_no_void_action(actions, act); @@ -2830,6 +2873,15 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); rte_flow_error_set(error, EINVAL, @@ -2848,7 +2900,7 @@ ixgbe_clear_rss_filter(struct rte_eth_dev *dev) struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE); } @@ -3167,9 +3219,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "failed to allocate memory"); goto out; } - rte_memcpy(&rss_filter_ptr->filter_info, - &rss_conf, - sizeof(struct ixgbe_rte_flow_rss_conf)); + ixgbe_rss_conf_init(&rss_filter_ptr->filter_info, + &rss_conf.conf); TAILQ_INSERT_TAIL(&filter_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c index 176ec0fd..de7ed367 100644 --- a/drivers/net/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ixgbe/ixgbe_ipsec.c @@ -598,13 +598,18 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t reg; + uint64_t rx_offloads; + uint64_t tx_offloads; + + rx_offloads = dev->data->dev_conf.rxmode.offloads; + tx_offloads = dev->data->dev_conf.txmode.offloads; /* sanity checks */ - if (dev->data->dev_conf.rxmode.enable_lro) { + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { PMD_DRV_LOG(ERR, "RSC and IPsec not supported"); return -1; } - if (!dev->data->dev_conf.rxmode.hw_strip_crc) { + if (!(rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec"); return -1; } @@ -624,7 +629,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) { + if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) { IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); if (reg != 0) { @@ -632,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev) return -1; } } - if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) { + if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) { IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD); reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index ea997371..4d199c80 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -90,6 +90,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) if (*vfinfo == NULL) rte_panic("Cannot allocate memory for private VF data\n"); + rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id); + memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info)); memset(uta_info, 0, sizeof(struct ixgbe_uta_info)); hw->mac.mc_filter_type = 0; @@ -122,6 +124,7 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) { struct ixgbe_vf_info **vfinfo; uint16_t vf_num; + int ret; PMD_INIT_FUNC_TRACE(); @@ -132,6 +135,10 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0; RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0; + ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); + vf_num = dev_num_vf(eth_dev); if (vf_num == 0) return; @@ -329,10 +336,7 @@ set_rx_mode(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - if (dev->data->dev_conf.rxmode.hw_vlan_strip) - ixgbe_vlan_hw_strip_enable_all(dev); - else - ixgbe_vlan_hw_strip_disable_all(dev); + ixgbe_vlan_hw_strip_config(dev); } static inline void diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 6c582b4b..3e13d26a 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -2379,7 +2379,7 @@ void __attribute__((cold)) ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) { /* Use a simple Tx queue (no offloads, no multi segs) if possible */ - if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && + if ((txq->offloads == 0) && #ifdef RTE_LIBRTE_SECURITY !(txq->using_ipsec) && #endif @@ -2398,9 +2398,8 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) } else { PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); PMD_INIT_LOG(DEBUG, - " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]", - (unsigned long)txq->txq_flags, - (unsigned long)IXGBE_SIMPLE_FLAGS); + " - offloads = 0x%" PRIx64, + txq->offloads); PMD_INIT_LOG(DEBUG, " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", (unsigned long)txq->tx_rs_thresh, @@ -2410,6 +2409,45 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq) } } +uint64_t +ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + + return 0; +} + +uint64_t +ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) + tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; +#endif + return tx_offload_capa; +} + int __attribute__((cold)) ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -2421,10 +2459,13 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq; struct ixgbe_hw *hw; uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple @@ -2550,7 +2591,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; + txq->offloads = offloads; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; #ifdef RTE_LIBRTE_SECURITY @@ -2769,6 +2810,81 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) #endif } +static int +ixgbe_is_vf(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + switch (hw->mac.type) { + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return 1; + default: + return 0; + } +} + +uint64_t +ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_82598EB) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + return offloads; +} + +uint64_t +ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t offloads; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + offloads = DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER; + + if (hw->mac.type == ixgbe_mac_82598EB) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + if (ixgbe_is_vf(dev) == 0) + offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_EXTEND); + + /* + * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV + * mode. + */ + if ((hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) && + !RTE_ETH_DEV_SRIOV(dev).active) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; + + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) + offloads |= DEV_RX_OFFLOAD_SECURITY; +#endif + + return offloads; +} + int __attribute__((cold)) ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -2783,10 +2899,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t len; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple @@ -2816,10 +2935,11 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->offloads = offloads; /* * The packet type in RX descriptor is different for different NICs. @@ -4574,7 +4694,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) rsc_capable = true; - if (!rsc_capable && rx_conf->enable_lro) { + if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) { PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't " "support it"); return -EINVAL; @@ -4582,7 +4702,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */ - if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) { + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) && + (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) { /* * According to chapter of 4.6.7.2.1 of the Spec Rev. * 3.0 RSC configuration requires HW CRC stripping being @@ -4596,7 +4717,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RFCTL configuration */ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); - if ((rsc_capable) && (rx_conf->enable_lro)) + if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) /* * Since NFS packets coalescing is not supported - clear * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is @@ -4609,7 +4730,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); /* If LRO hasn't been requested - we are done here. */ - if (!rx_conf->enable_lro) + if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) return 0; /* Set RDRXCTL.RSCACKC bit */ @@ -4666,7 +4787,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) * at most 500us latency for a single RSC aggregation. */ eitr &= ~IXGBE_EITR_ITR_INT_MASK; - eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS; + eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT); + eitr |= IXGBE_EITR_CNT_WDIS; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl); IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl); @@ -4729,7 +4851,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Configure CRC stripping, if any. */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - if (rx_conf->hw_strip_crc) + if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; else hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP; @@ -4737,7 +4859,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Configure jumbo frame support, if any. */ - if (rx_conf->jumbo_frame == 1) { + if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { hlreg0 |= IXGBE_HLREG0_JUMBOEN; maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); maxfrs &= 0x0000FFFF; @@ -4757,6 +4879,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -4765,7 +4892,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure. */ - rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN; + rxq->crc_len = (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) ? + 0 : ETHER_CRC_LEN; /* Setup the Base and Length of the Rx Descriptor Rings */ bus_addr = rxq->rx_ring_phys_addr; @@ -4779,28 +4907,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0); /* Configure the SRRCTL register */ -#ifdef RTE_HEADER_SPLIT_ENABLE - /* - * Configure Header Split - */ - if (rx_conf->header_split) { - if (hw->mac.type == ixgbe_mac_82599EB) { - /* Must setup the PSRTYPE register */ - uint32_t psrtype; - - psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR; - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype); - } - srrctl = ((rx_conf->split_hdr_size << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else -#endif - srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; /* Set if packets are dropped when no descriptors available */ if (rxq->drop_en) @@ -4826,9 +4933,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE > buf_size) dev->data->scattered_rx = 1; + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; } - if (rx_conf->enable_scatter) + if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) dev->data->scattered_rx = 1; /* @@ -4843,7 +4952,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; - if (rx_conf->hw_ip_checksum) + if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) rxcsum |= IXGBE_RXCSUM_IPPCSE; else rxcsum &= ~IXGBE_RXCSUM_IPPCSE; @@ -4853,7 +4962,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540) { rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - if (rx_conf->hw_strip_crc) + if (rx_conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; else rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP; @@ -5259,6 +5368,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; } void @@ -5277,7 +5387,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; - qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.offloads = txq->offloads; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } @@ -5289,6 +5399,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; struct ixgbe_rx_queue *rxq; + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; uint64_t bus_addr; uint32_t srrctl, psrtype = 0; uint16_t buf_size; @@ -5328,6 +5439,11 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) ixgbevf_rlpml_set_vf(hw, (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + /* + * Assume no header split and no VLAN strip support + * on any Rx queue first . + */ + rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; @@ -5351,18 +5467,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) /* Configure the SRRCTL register */ -#ifdef RTE_HEADER_SPLIT_ENABLE - /* - * Configure Header Split - */ - if (dev->data->dev_conf.rxmode.header_split) { - srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else -#endif - srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; /* Set if packets are dropped when no descriptors available */ if (rxq->drop_en) @@ -5387,24 +5492,18 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << IXGBE_SRRCTL_BSIZEPKT_SHIFT); - if (dev->data->dev_conf.rxmode.enable_scatter || + if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER || /* It adds dual VLAN length for supporting dual VLAN */ - (dev->data->dev_conf.rxmode.max_rx_pkt_len + + (rxmode->max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; } - } -#ifdef RTE_HEADER_SPLIT_ENABLE - if (dev->data->dev_conf.rxmode.header_split) - /* Must setup the PSRTYPE register */ - psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR; -#endif + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } /* Set RQPL for VF RSS according to max Rx queue */ psrtype |= (dev->data->nb_rx_queues >> 1) << @@ -5521,6 +5620,40 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } } +int +ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +ixgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + int ixgbe_config_rss_filter(struct rte_eth_dev *dev, struct ixgbe_rte_flow_rss_conf *conf, bool add) @@ -5531,7 +5664,12 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, uint16_t j; uint16_t sp_reta_size; uint32_t reta_reg; - struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); @@ -5541,8 +5679,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, sp_reta_size = ixgbe_reta_size_get(hw->mac.type); if (!add) { - if (memcmp(conf, &filter_info->rss_info, - sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) { + if (ixgbe_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { ixgbe_rss_disable(dev); memset(&filter_info->rss_info, 0, sizeof(struct ixgbe_rte_flow_rss_conf)); @@ -5551,7 +5689,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, return -EINVAL; } - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) return -EINVAL; /* Fill in redirection table * The byte-swap is needed because NIC registers are in @@ -5561,9 +5699,9 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, for (i = 0, j = 0; i < sp_reta_size; i++, j++) { reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); - if (j == conf->num) + if (j == conf->conf.queue_num) j = 0; - reta = (reta << 8) | conf->queue[j]; + reta = (reta << 8) | conf->conf.queue[j]; if ((i & 3) == 3) IXGBE_WRITE_REG(hw, reta_reg, rte_bswap32(reta)); @@ -5580,8 +5718,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, rss_conf.rss_key = rss_intel_key; /* Default hash key */ ixgbe_hw_rss_hash_set(hw, &rss_conf); - rte_memcpy(&filter_info->rss_info, - conf, sizeof(struct ixgbe_rte_flow_rss_conf)); + if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf)) + return -EINVAL; return 0; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 69c718bc..39378f75 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -129,6 +129,7 @@ struct ixgbe_rx_queue { uint8_t rx_deferred_start; /**< not in global dev start. */ /** flags to set in mbuf when a vlan is detected. */ uint64_t vlan_flags; + uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */ /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ struct rte_mbuf fake_mbuf; /** hold packets to return to application */ @@ -221,7 +222,7 @@ struct ixgbe_tx_queue { uint8_t pthresh; /**< Prefetch threshold register. */ uint8_t hthresh; /**< Host threshold register. */ uint8_t wthresh; /**< Write-back threshold reg. */ - uint32_t txq_flags; /**< Holds flags for this TXq */ + uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */ uint32_t ctx_curr; /**< Hardware context states. */ /** Hardware context0 history. */ struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; @@ -239,20 +240,6 @@ struct ixgbe_txq_ops { void (*reset)(struct ixgbe_tx_queue *txq); }; -/* - * The "simple" TX queue functions require that the following - * flags are set when the TX queue is configured: - * - ETH_TXQ_FLAGS_NOMULTSEGS - * - ETH_TXQ_FLAGS_NOVLANOFFL - * - ETH_TXQ_FLAGS_NOXSUMSCTP - * - ETH_TXQ_FLAGS_NOXSUMUDP - * - ETH_TXQ_FLAGS_NOXSUMTCP - * and that the RS bit threshold (tx_rs_thresh) is at least equal to - * RTE_PMD_IXGBE_TX_MAX_BURST. - */ -#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - /* * Populate descriptors with the following info: * 1.) buffer_addr = phys_addr + headroom @@ -305,6 +292,11 @@ extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX]; uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq); - #endif /* RTE_IXGBE_INC_VECTOR */ + +uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev); +uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev); + #endif /* _IXGBE_RXTX_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h index 414840a2..a97c2718 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h @@ -278,17 +278,12 @@ static inline int ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) { #ifndef RTE_LIBRTE_IEEE1588 - struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; /* no fdir support */ if (fconf->mode != RTE_FDIR_MODE_NONE) return -1; - /* no header split support */ - if (rxmode->header_split == 1) - return -1; - return 0; #else RTE_SET_USED(dev); diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c index e0f9998f..edb13835 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c @@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; /* no csum error report support */ - if (rxmode->hw_ip_checksum == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) return -1; return ixgbe_rx_vec_dev_conf_condition_check_default(dev); diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c new file mode 100644 index 00000000..db516d99 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_vf_representor.c @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation. + */ + +#include +#include +#include + +#include "base/ixgbe_type.h" +#include "base/ixgbe_vf.h" +#include "ixgbe_ethdev.h" +#include "ixgbe_rxtx.h" +#include "rte_pmd_ixgbe.h" + + +static int +ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev, + int wait_to_complete) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + return ixgbe_dev_link_update_share(representor->pf_ethdev, + wait_to_complete, 0); +} + +static int +ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev, + struct ether_addr *mac_addr) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + return rte_pmd_ixgbe_set_vf_mac_addr( + representor->pf_ethdev->data->port_id, + representor->vf_id, mac_addr); +} + +static void +ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev, + struct rte_eth_dev_info *dev_info) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW( + representor->pf_ethdev->data->dev_private); + + dev_info->device = representor->pf_ethdev->device; + + dev_info->min_rx_bufsize = 1024; + /**< Minimum size of RX buffer. */ + dev_info->max_rx_pktlen = 9728; + /**< Maximum configurable length of RX pkt. */ + dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + /**< Maximum number of RX queues. */ + dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; + /**< Maximum number of TX queues. */ + + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + /**< Maximum number of MAC addresses. */ + + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + /**< Device RX offload capabilities. */ + + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + /**< Device TX offload capabilities. */ + + dev_info->speed_capa = + representor->pf_ethdev->data->dev_link.link_speed; + /**< Supported speeds bitmap (ETH_LINK_SPEED_). */ + + dev_info->switch_info.name = + representor->pf_ethdev->device->name; + dev_info->switch_info.domain_id = representor->switch_domain_id; + dev_info->switch_info.port_id = representor->vf_id; +} + +static int ixgbe_vf_representor_dev_configure( + __rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static int ixgbe_vf_representor_rx_queue_setup( + __rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + __rte_unused struct rte_mempool *mb_pool) +{ + return 0; +} + +static int ixgbe_vf_representor_tx_queue_setup( + __rte_unused struct rte_eth_dev *dev, + __rte_unused uint16_t rx_queue_id, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + return 0; +} + +static int ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev) +{ + return 0; +} + +static void ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev) +{ +} + +static int +ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev, + uint16_t vlan_id, int on) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + uint64_t vf_mask = 1ULL << representor->vf_id; + + return rte_pmd_ixgbe_set_vf_vlan_filter( + representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on); +} + +static void +ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev, + __rte_unused uint16_t rx_queue_id, int on) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + rte_pmd_ixgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id, + representor->vf_id, on); +} + +struct eth_dev_ops ixgbe_vf_representor_dev_ops = { + .dev_infos_get = ixgbe_vf_representor_dev_infos_get, + + .dev_start = ixgbe_vf_representor_dev_start, + .dev_configure = ixgbe_vf_representor_dev_configure, + .dev_stop = ixgbe_vf_representor_dev_stop, + + .rx_queue_setup = ixgbe_vf_representor_rx_queue_setup, + .tx_queue_setup = ixgbe_vf_representor_tx_queue_setup, + + .link_update = ixgbe_vf_representor_link_update, + + .vlan_filter_set = ixgbe_vf_representor_vlan_filter_set, + .vlan_strip_queue_set = ixgbe_vf_representor_vlan_strip_queue_set, + + .mac_addr_set = ixgbe_vf_representor_mac_addr_set, +}; + +static uint16_t +ixgbe_vf_representor_rx_burst(__rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static uint16_t +ixgbe_vf_representor_tx_burst(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int +ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params) +{ + struct ixgbe_vf_representor *representor = ethdev->data->dev_private; + + struct ixgbe_vf_info *vf_data; + struct rte_pci_device *pci_dev; + struct rte_eth_link *link; + + if (!representor) + return -ENOMEM; + + representor->vf_id = + ((struct ixgbe_vf_representor *)init_params)->vf_id; + representor->switch_domain_id = + ((struct ixgbe_vf_representor *)init_params)->switch_domain_id; + representor->pf_ethdev = + ((struct ixgbe_vf_representor *)init_params)->pf_ethdev; + + pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev); + + if (representor->vf_id >= pci_dev->max_vfs) + return -ENODEV; + + ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; + + /* Set representor device ops */ + ethdev->dev_ops = &ixgbe_vf_representor_dev_ops; + + /* No data-path, but need stub Rx/Tx functions to avoid crash + * when testing with the likes of testpmd. + */ + ethdev->rx_pkt_burst = ixgbe_vf_representor_rx_burst; + ethdev->tx_pkt_burst = ixgbe_vf_representor_tx_burst; + + /* Setting the number queues allocated to the VF */ + ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + ethdev->data->nb_tx_queues = IXGBE_VF_MAX_RX_QUEUES; + + /* Reference VF mac address from PF data structure */ + vf_data = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( + representor->pf_ethdev->data->dev_private); + + ethdev->data->mac_addrs = (struct ether_addr *) + vf_data[representor->vf_id].vf_mac_addresses; + + /* Link state. Inherited from PF */ + link = &representor->pf_ethdev->data->dev_link; + + ethdev->data->dev_link.link_speed = link->link_speed; + ethdev->data->dev_link.link_duplex = link->link_duplex; + ethdev->data->dev_link.link_status = link->link_status; + ethdev->data->dev_link.link_autoneg = link->link_autoneg; + + return 0; +} + +int +ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused) +{ + return 0; +} diff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build index 60af0bae..02d5ef5e 100644 --- a/drivers/net/ixgbe/meson.build +++ b/drivers/net/ixgbe/meson.build @@ -1,6 +1,8 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation +version = 2 + cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS'] subdir('base') @@ -17,6 +19,7 @@ sources = files( 'ixgbe_pf.c', 'ixgbe_rxtx.c', 'ixgbe_tm.c', + 'ixgbe_vf_representor.c', 'rte_pmd_ixgbe.c' ) diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c index d8ca8ca3..3a874f9a 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe.c +++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c @@ -5,6 +5,7 @@ #include #include "base/ixgbe_api.h" +#include "base/ixgbe_x550.h" #include "ixgbe_ethdev.h" #include "rte_pmd_ixgbe.h" @@ -880,6 +881,34 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port, return 0; } +int __rte_experimental +rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t fctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + + /* If 'enable' set the SBP bit else clear it */ + if (enable) + fctrl |= IXGBE_FCTRL_SBP; + else + fctrl &= ~(IXGBE_FCTRL_SBP); + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + return 0; +} + #ifdef RTE_LIBRTE_IXGBE_BYPASS int rte_pmd_ixgbe_bypass_init(uint16_t port_id) @@ -1012,3 +1041,204 @@ rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id) return ixgbe_bypass_wd_reset(dev); } #endif + +/** + * rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and get the shared phy token as needed + */ +STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask) +{ + int retries = FW_PHY_TOKEN_RETRIES; + s32 status = IXGBE_SUCCESS; + + while (--retries) { + status = ixgbe_acquire_swfw_semaphore(hw, mask); + if (status) { + PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n", + status); + return status; + } + status = ixgbe_get_phy_token(hw); + if (status == IXGBE_SUCCESS) + return IXGBE_SUCCESS; + + if (status == IXGBE_ERR_TOKEN_RETRY) + PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n", + status); + + ixgbe_release_swfw_semaphore(hw, mask); + if (status != IXGBE_ERR_TOKEN_RETRY) { + PMD_DRV_LOG(ERR, + "Retry get PHY token failed, Status=%d\n", + status); + return status; + } + } + PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n", + hw->phy.id); + return status; +} + +/** + * rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore and puts the shared phy token as needed + */ +STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask) +{ + ixgbe_put_phy_token(hw); + ixgbe_release_swfw_semaphore(hw, mask); +} + +int __rte_experimental +rte_pmd_ixgbe_mdio_lock(uint16_t port) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + u32 swfw_mask; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + if (hw->bus.lan_id) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + + return IXGBE_SUCCESS; +} + +int __rte_experimental +rte_pmd_ixgbe_mdio_unlock(uint16_t port) +{ + struct rte_eth_dev *dev; + struct ixgbe_hw *hw; + u32 swfw_mask; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + if (hw->bus.lan_id) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + rte_pmd_ixgbe_release_swfw(hw, swfw_mask); + + return IXGBE_SUCCESS; +} + +int __rte_experimental +rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t *phy_data) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + u32 i, data, command; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + /* Setup and write the read command */ + command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the access completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if (!(command & IXGBE_MSCA_MDI_COMMAND)) + break; + } + if (command & IXGBE_MSCA_MDI_COMMAND) + return IXGBE_ERR_PHY; + + /* Read operation is complete. Get the data from MSRWD */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)data; + + return 0; +} + +int __rte_experimental +rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t phy_data) +{ + struct ixgbe_hw *hw; + u32 i, command; + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + if (!is_ixgbe_supported(dev)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!hw) + return -ENOTSUP; + + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the write command */ + command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the access completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if (!(command & IXGBE_MSCA_MDI_COMMAND)) + break; + } + if (command & IXGBE_MSCA_MDI_COMMAND) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + return 0; +} diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h index 11a9f334..72a941f9 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe.h +++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h @@ -573,6 +573,77 @@ int rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout); */ int rte_pmd_ixgbe_bypass_wd_reset(uint16_t port); +/** + * Acquire swfw semaphore lock for MDIO access + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (IXGBE_ERR_SWFW_SYNC) If sw/fw semaphore acquisition failed + */ +int __rte_experimental +rte_pmd_ixgbe_mdio_lock(uint16_t port); + +/** + * Release swfw semaphore lock used for MDIO access + * + * @param port + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + */ +int __rte_experimental +rte_pmd_ixgbe_mdio_unlock(uint16_t port); + +/** + * Read PHY register using MDIO without MDIO lock + * The lock must be taken separately before calling this + * API + * @param port + * The port identifier of the Ethernet device. + * @param reg_addr + * 32 bit PHY Register + * @param dev_type + * Used to define device base address + * @param phy_data + * Pointer for reading PHY register data + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (IXGBE_ERR_PHY) If PHY read command failed + */ +int __rte_experimental +rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t *phy_data); + +/** + * Write data to PHY register using without MDIO lock + * The lock must be taken separately before calling this + * API + * + * @param port + * The port identifier of the Ethernet device. + * @param reg_addr + * 32 bit PHY Register + * @param dev_type + * Used to define device base address + * @param phy_data + * Data to write to PHY register + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (IXGBE_ERR_PHY) If PHY read command failed + */ +int __rte_experimental +rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr, + uint32_t dev_type, uint16_t phy_data); /** * Response sent back to ixgbe driver from user app after callback @@ -637,4 +708,17 @@ enum { ((x) > RTE_PMD_IXGBE_BYPASS_TMT_OFF && \ (x) < RTE_PMD_IXGBE_BYPASS_TMT_NUM)) +/** + * @param port + * The port identifier of the Ethernet device. + * @param enable + * 0 to disable and nonzero to enable 'SBP' bit in FCTRL register + * to receive all packets + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-ENOTSUP) if hardware doesn't support this feature. + */ +int __rte_experimental +rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable); #endif /* _PMD_IXGBE_H_ */ diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map index bf776742..c814f96d 100644 --- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map +++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map @@ -52,3 +52,13 @@ DPDK_17.08 { rte_pmd_ixgbe_bypass_wd_timeout_show; rte_pmd_ixgbe_bypass_wd_timeout_store; } DPDK_17.05; + +EXPERIMENTAL { + global: + + rte_pmd_ixgbe_mdio_lock; + rte_pmd_ixgbe_mdio_unlock; + rte_pmd_ixgbe_mdio_unlocked_read; + rte_pmd_ixgbe_mdio_unlocked_write; + rte_pmd_ixgbe_upd_fctrl_sbp; +}; diff --git a/drivers/net/kni/Makefile b/drivers/net/kni/Makefile index 01eaef05..562e8d2d 100644 --- a/drivers/net/kni/Makefile +++ b/drivers/net/kni/Makefile @@ -10,6 +10,7 @@ LIB = librte_pmd_kni.a CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API LDLIBS += -lpthread LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_kni diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c index dc4e65f5..ab63ea42 100644 --- a/drivers/net/kni/rte_eth_kni.c +++ b/drivers/net/kni/rte_eth_kni.c @@ -61,10 +61,15 @@ static const struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG, + .link_autoneg = ETH_LINK_FIXED, }; static int is_kni_initialized; +static int eth_kni_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_kni_logtype, \ + "%s(): " fmt "\n", __func__, ##args) static uint16_t eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { @@ -126,8 +131,8 @@ eth_kni_start(struct rte_eth_dev *dev) internals->kni = rte_kni_alloc(mb_pool, &conf, NULL); if (internals->kni == NULL) { - RTE_LOG(ERR, PMD, - "Fail to create kni interface for port: %d\n", + PMD_LOG(ERR, + "Fail to create kni interface for port: %d", port_id); return -1; } @@ -149,11 +154,12 @@ eth_kni_dev_start(struct rte_eth_dev *dev) } if (internals->no_request_thread == 0) { - ret = pthread_create(&internals->thread, NULL, + ret = rte_ctrl_thread_create(&internals->thread, + "kni_handle_req", NULL, kni_handle_request, internals); if (ret) { - RTE_LOG(ERR, PMD, - "Fail to create kni request thread\n"); + PMD_LOG(ERR, + "Fail to create kni request thread"); return -1; } } @@ -174,11 +180,11 @@ eth_kni_dev_stop(struct rte_eth_dev *dev) ret = pthread_cancel(internals->thread); if (ret) - RTE_LOG(ERR, PMD, "Can't cancel the thread\n"); + PMD_LOG(ERR, "Can't cancel the thread"); ret = pthread_join(internals->thread, NULL); if (ret) - RTE_LOG(ERR, PMD, "Can't join the thread\n"); + PMD_LOG(ERR, "Can't join the thread"); internals->stop_thread = 0; } @@ -201,7 +207,6 @@ eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused, dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT; dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT; dev_info->min_rx_bufsize = 0; - dev_info->pci_dev = NULL; } static int @@ -337,25 +342,17 @@ eth_kni_create(struct rte_vdev_device *vdev, struct pmd_internals *internals; struct rte_eth_dev_data *data; struct rte_eth_dev *eth_dev; - const char *name; - RTE_LOG(INFO, PMD, "Creating kni ethdev on numa socket %u\n", + PMD_LOG(INFO, "Creating kni ethdev on numa socket %u", numa_node); - name = rte_vdev_device_name(vdev); - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (data == NULL) - return NULL; - /* reserve an ethdev entry */ eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals)); - if (eth_dev == NULL) { - rte_free(data); + if (!eth_dev) return NULL; - } internals = eth_dev->data->dev_private; - rte_memcpy(data, eth_dev->data, sizeof(*data)); + data = eth_dev->data; data->nb_rx_queues = 1; data->nb_tx_queues = 1; data->dev_link = pmd_link; @@ -363,7 +360,6 @@ eth_kni_create(struct rte_vdev_device *vdev, eth_random_addr(internals->eth_addr.addr_bytes); - eth_dev->data = data; eth_dev->dev_ops = ð_kni_ops; internals->no_request_thread = args->no_request_thread; @@ -412,7 +408,20 @@ eth_kni_probe(struct rte_vdev_device *vdev) name = rte_vdev_device_name(vdev); params = rte_vdev_device_args(vdev); - RTE_LOG(INFO, PMD, "Initializing eth_kni for %s\n", name); + PMD_LOG(INFO, "Initializing eth_kni for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(params) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = ð_kni_ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } ret = eth_kni_kvargs_process(&args, params); if (ret < 0) @@ -429,6 +438,7 @@ eth_kni_probe(struct rte_vdev_device *vdev) eth_dev->rx_pkt_burst = eth_kni_rx; eth_dev->tx_pkt_burst = eth_kni_tx; + rte_eth_dev_probing_finish(eth_dev); return 0; kni_uninit: @@ -446,7 +456,7 @@ eth_kni_remove(struct rte_vdev_device *vdev) const char *name; name = rte_vdev_device_name(vdev); - RTE_LOG(INFO, PMD, "Un-Initializing eth_kni for %s\n", name); + PMD_LOG(INFO, "Un-Initializing eth_kni for %s", name); /* find the ethdev entry */ eth_dev = rte_eth_dev_allocated(name); @@ -459,7 +469,6 @@ eth_kni_remove(struct rte_vdev_device *vdev) rte_kni_release(internals->kni); rte_free(internals); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); @@ -477,3 +486,12 @@ static struct rte_vdev_driver eth_kni_drv = { RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv); RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "="); + +RTE_INIT(eth_kni_init_log); +static void +eth_kni_init_log(void) +{ + eth_kni_logtype = rte_log_register("pmd.net.kni"); + if (eth_kni_logtype >= 0) + rte_log_set_level(eth_kni_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c index e1a20cd6..64b1b86c 100644 --- a/drivers/net/liquidio/lio_ethdev.c +++ b/drivers/net/liquidio/lio_ethdev.c @@ -373,8 +373,6 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev, struct lio_device *lio_dev = LIO_DEV(eth_dev); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - devinfo->pci_dev = pci_dev; - switch (pci_dev->id.subsystem_device_id) { /* CN23xx 10G cards */ case PCI_SUBSYS_DEV_ID_CN2350_210: @@ -478,9 +476,11 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) } if (frame_len > ETHER_MAX_LEN) - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len; eth_dev->data->mtu = mtu; @@ -904,32 +904,6 @@ lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) return 0; } -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param eth_dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = ð_dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - static uint64_t lio_hweight64(uint64_t w) { @@ -949,23 +923,19 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete __rte_unused) { struct lio_device *lio_dev = LIO_DEV(eth_dev); - struct rte_eth_link link, old; + struct rte_eth_link link; /* Initialize */ + memset(&link, 0, sizeof(link)); link.link_status = ETH_LINK_DOWN; link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_autoneg = ETH_LINK_AUTONEG; - memset(&old, 0, sizeof(old)); /* Return what we found */ if (lio_dev->linfo.link.s.link_up == 0) { /* Interface is down */ - if (lio_dev_atomic_write_link_status(eth_dev, &link)) - return -1; - if (link.link_status == old.link_status) - return -1; - return 0; + return rte_eth_linkstatus_set(eth_dev, &link); } link.link_status = ETH_LINK_UP; /* Interface is up */ @@ -982,13 +952,7 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev, link.link_duplex = ETH_LINK_HALF_DUPLEX; } - if (lio_dev_atomic_write_link_status(eth_dev, &link)) - return -1; - - if (link.link_status == old.link_status) - return -1; - - return 0; + return rte_eth_linkstatus_set(eth_dev, &link); } /** @@ -1441,6 +1405,11 @@ lio_dev_start(struct rte_eth_dev *eth_dev) /* Configure RSS if device configured with multiple RX queues. */ lio_dev_mq_rx_configure(eth_dev); + /* Before update the link info, + * must set linfo.link.link_status64 to 0. + */ + lio_dev->linfo.link.link_status64 = 0; + /* start polling for lsc */ ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check, @@ -2146,19 +2115,8 @@ static int lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - struct rte_eth_dev *eth_dev; - int ret; - - eth_dev = rte_eth_dev_pci_allocate(pci_dev, - sizeof(struct lio_device)); - if (eth_dev == NULL) - return -ENOMEM; - - ret = lio_eth_dev_init(eth_dev); - if (ret) - rte_eth_dev_pci_release(eth_dev); - - return ret; + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device), + lio_eth_dev_init); } static int diff --git a/drivers/net/meson.build b/drivers/net/meson.build index 704cbe3c..b7d00a04 100644 --- a/drivers/net/meson.build +++ b/drivers/net/meson.build @@ -1,10 +1,10 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -drivers = ['af_packet', 'bonding', - 'e1000', 'fm10k', 'i40e', 'ixgbe', - 'null', 'octeontx', 'pcap', 'ring', - 'sfc', 'thunderx'] +drivers = ['af_packet', 'axgbe', 'bonding', 'dpaa', 'dpaa2', + 'e1000', 'enic', 'fm10k', 'i40e', 'ixgbe', + 'mvpp2', 'null', 'octeontx', 'pcap', 'ring', + 'sfc', 'thunderx', 'virtio'] std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std std_deps += ['bus_vdev'] # same with vdev bus diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile index cc800493..73f9d405 100644 --- a/drivers/net/mlx4/Makefile +++ b/drivers/net/mlx4/Makefile @@ -1,33 +1,6 @@ -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2012 6WIND S.A. -# Copyright 2012 Mellanox -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# Copyright 2012 Mellanox Technologies, Ltd include $(RTE_SDK)/mk/rte.vars.mk @@ -64,6 +37,7 @@ CFLAGS += -D_BSD_SOURCE CFLAGS += -D_DEFAULT_SOURCE CFLAGS += -D_XOPEN_SOURCE=600 CFLAGS += $(WERROR_FLAGS) +CFLAGS += -DALLOW_EXPERIMENTAL_API ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y) CFLAGS += -DMLX4_GLUE='"$(LIB_GLUE)"' CFLAGS += -DMLX4_GLUE_VERSION='"$(LIB_GLUE_VERSION)"' @@ -95,10 +69,6 @@ else CFLAGS += -DNDEBUG -UPEDANTIC endif -ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE -CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE) -endif - include $(RTE_SDK)/mk/rte.lib.mk # Generate and clean-up mlx4_autoconf.h. @@ -132,8 +102,13 @@ ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y) $(LIB): $(LIB_GLUE) +ifeq ($(LINK_USING_CC),1) +GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS)) +else +GLUE_LDFLAGS := $(LDFLAGS) +endif $(LIB_GLUE): mlx4_glue.o - $Q $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) \ + $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \ -Wl,-h,$(LIB_GLUE) \ -s -shared -o $@ $< -libverbs -lmlx4 diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index ee93dafe..a29814b3 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2012 6WIND S.A. - * Copyright 2012 Mellanox + * Copyright 2012 Mellanox Technologies, Ltd */ /** @@ -44,9 +44,15 @@ #include "mlx4.h" #include "mlx4_glue.h" #include "mlx4_flow.h" +#include "mlx4_mr.h" #include "mlx4_rxtx.h" #include "mlx4_utils.h" +struct mlx4_dev_list mlx4_mem_event_cb_list = + LIST_HEAD_INITIALIZER(mlx4_mem_event_cb_list); + +rte_rwlock_t mlx4_mem_event_rwlock = RTE_RWLOCK_INITIALIZER; + /** Configuration structure for device arguments. */ struct mlx4_conf { struct { @@ -61,6 +67,8 @@ const char *pmd_mlx4_init_params[] = { NULL, }; +static void mlx4_dev_stop(struct rte_eth_dev *dev); + /** * DPDK callback for Ethernet device configuration. * @@ -123,6 +131,9 @@ mlx4_dev_start(struct rte_eth_dev *dev) (void *)dev, strerror(-ret)); goto err; } +#ifndef NDEBUG + mlx4_mr_dump_dev(dev); +#endif ret = mlx4_rxq_intr_enable(priv); if (ret) { ERROR("%p: interrupt handler installation failed", @@ -143,8 +154,7 @@ mlx4_dev_start(struct rte_eth_dev *dev) dev->rx_pkt_burst = mlx4_rx_burst; return 0; err: - /* Rollback. */ - priv->started = 0; + mlx4_dev_stop(dev); return ret; } @@ -194,10 +204,12 @@ mlx4_dev_close(struct rte_eth_dev *dev) dev->tx_pkt_burst = mlx4_tx_burst_removed; rte_wmb(); mlx4_flow_clean(priv); + mlx4_rss_deinit(priv); for (i = 0; i != dev->data->nb_rx_queues; ++i) mlx4_rx_queue_release(dev->data->rx_queues[i]); for (i = 0; i != dev->data->nb_tx_queues; ++i) mlx4_tx_queue_release(dev->data->tx_queues[i]); + mlx4_mr_release(dev); if (priv->pd != NULL) { assert(priv->ctx != NULL); claim_zero(mlx4_glue->dealloc_pd(priv->pd)); @@ -385,6 +397,99 @@ free_kvlist: return ret; } +/** + * Interpret RSS capabilities reported by device. + * + * This function returns the set of usable Verbs RSS hash fields, kernel + * quirks taken into account. + * + * @param ctx + * Verbs context. + * @param pd + * Verbs protection domain. + * @param device_attr_ex + * Extended device attributes to interpret. + * + * @return + * Usable RSS hash fields mask in Verbs format. + */ +static uint64_t +mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd, + struct ibv_device_attr_ex *device_attr_ex) +{ + uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask; + struct ibv_cq *cq = NULL; + struct ibv_wq *wq = NULL; + struct ibv_rwq_ind_table *ind = NULL; + struct ibv_qp *qp = NULL; + + if (!hw_rss_sup) { + WARN("no RSS capabilities reported; disabling support for UDP" + " RSS and inner VXLAN RSS"); + return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 | + IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 | + IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP; + } + if (!(hw_rss_sup & IBV_RX_HASH_INNER)) + return hw_rss_sup; + /* + * Although reported as supported, missing code in some Linux + * versions (v4.15, v4.16) prevents the creation of hash QPs with + * inner capability. + * + * There is no choice but to attempt to instantiate a temporary RSS + * context in order to confirm its support. + */ + cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0); + wq = cq ? mlx4_glue->create_wq + (ctx, + &(struct ibv_wq_init_attr){ + .wq_type = IBV_WQT_RQ, + .max_wr = 1, + .max_sge = 1, + .pd = pd, + .cq = cq, + }) : NULL; + ind = wq ? mlx4_glue->create_rwq_ind_table + (ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = 0, + .ind_tbl = &wq, + .comp_mask = 0, + }) : NULL; + qp = ind ? mlx4_glue->create_qp_ex + (ctx, + &(struct ibv_qp_init_attr_ex){ + .comp_mask = + (IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_RX_HASH | + IBV_QP_INIT_ATTR_IND_TABLE), + .qp_type = IBV_QPT_RAW_PACKET, + .pd = pd, + .rwq_ind_tbl = ind, + .rx_hash_conf = { + .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE, + .rx_hash_key = mlx4_rss_hash_key_default, + .rx_hash_fields_mask = hw_rss_sup, + }, + }) : NULL; + if (!qp) { + WARN("disabling unusable inner RSS capability due to kernel" + " quirk"); + hw_rss_sup &= ~IBV_RX_HASH_INNER; + } else { + claim_zero(mlx4_glue->destroy_qp(qp)); + } + if (ind) + claim_zero(mlx4_glue->destroy_rwq_ind_table(ind)); + if (wq) + claim_zero(mlx4_glue->destroy_wq(wq)); + if (cq) + claim_zero(mlx4_glue->destroy_cq(cq)); + return hw_rss_sup; +} + static struct rte_pci_driver mlx4_driver; /** @@ -562,22 +667,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) (device_attr.vendor_part_id == PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO); DEBUG("L2 tunnel checksum offloads are %ssupported", - (priv->hw_csum_l2tun ? "" : "not ")); - priv->hw_rss_sup = device_attr_ex.rss_caps.rx_hash_fields_mask; - if (!priv->hw_rss_sup) { - WARN("no RSS capabilities reported; disabling support" - " for UDP RSS and inner VXLAN RSS"); - /* Fake support for all possible RSS hash fields. */ - priv->hw_rss_sup = ~UINT64_C(0); - priv->hw_rss_sup = mlx4_conv_rss_hf(priv, -1); - /* Filter out known unsupported fields. */ - priv->hw_rss_sup &= - ~(uint64_t)(IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP | - IBV_RX_HASH_INNER); - } + priv->hw_csum_l2tun ? "" : "not "); + priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd, + &device_attr_ex); DEBUG("supported RSS hash fields mask: %016" PRIx64, priv->hw_rss_sup); + priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps & + IBV_RAW_PACKET_CAP_SCATTER_FCS); + DEBUG("FCS stripping toggling is %ssupported", + priv->hw_fcs_strip ? "" : "not "); /* Configure the first MAC address by default. */ if (mlx4_get_mac(priv, &mac.addr_bytes)) { ERROR("cannot get MAC address, is mlx4_en loaded?" @@ -649,6 +747,24 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) /* Update link status once if waiting for LSC. */ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) mlx4_link_update(eth_dev, 0); + /* + * Once the device is added to the list of memory event + * callback, its global MR cache table cannot be expanded + * on the fly because of deadlock. If it overflows, lookup + * should be done by searching MR list linearly, which is slow. + */ + err = mlx4_mr_btree_init(&priv->mr.cache, + MLX4_MR_BTREE_CACHE_N * 2, + eth_dev->device->numa_node); + if (err) { + /* rte_errno is already set. */ + goto port_error; + } + /* Add device to memory callback list. */ + rte_rwlock_write_lock(&mlx4_mem_event_rwlock); + LIST_INSERT_HEAD(&mlx4_mem_event_cb_list, priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx4_mem_event_rwlock); + rte_eth_dev_probing_finish(eth_dev); continue; port_error: rte_free(priv); @@ -707,12 +823,54 @@ static struct rte_pci_driver mlx4_driver = { #ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS +/** + * Suffix RTE_EAL_PMD_PATH with "-glue". + * + * This function performs a sanity check on RTE_EAL_PMD_PATH before + * suffixing its last component. + * + * @param buf[out] + * Output buffer, should be large enough otherwise NULL is returned. + * @param size + * Size of @p out. + * + * @return + * Pointer to @p buf or @p NULL in case suffix cannot be appended. + */ +static char * +mlx4_glue_path(char *buf, size_t size) +{ + static const char *const bad[] = { "/", ".", "..", NULL }; + const char *path = RTE_EAL_PMD_PATH; + size_t len = strlen(path); + size_t off; + int i; + + while (len && path[len - 1] == '/') + --len; + for (off = len; off && path[off - 1] != '/'; --off) + ; + for (i = 0; bad[i]; ++i) + if (!strncmp(path + off, bad[i], (int)(len - off))) + goto error; + i = snprintf(buf, size, "%.*s-glue", (int)len, path); + if (i == -1 || (size_t)i >= size) + goto error; + return buf; +error: + ERROR("unable to append \"-glue\" to last component of" + " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," + " please re-configure DPDK"); + return NULL; +} + /** * Initialization routine for run-time dependency on rdma-core. */ static int mlx4_glue_init(void) { + char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; const char *path[] = { /* * A basic security check is necessary before trusting @@ -720,7 +878,13 @@ mlx4_glue_init(void) */ (geteuid() == getuid() && getegid() == getgid() ? getenv("MLX4_GLUE_PATH") : NULL), - RTE_EAL_PMD_PATH, + /* + * When RTE_EAL_PMD_PATH is set, use its glue-suffixed + * variant, otherwise let dlopen() look up libraries on its + * own. + */ + (*RTE_EAL_PMD_PATH ? + mlx4_glue_path(glue_path, sizeof(glue_path)) : ""), }; unsigned int i = 0; void *handle = NULL; @@ -828,6 +992,8 @@ rte_mlx4_pmd_init(void) } mlx4_glue->fork_init(); rte_pci_register(&mlx4_driver); + rte_mem_event_callback_register("MLX4_MEM_EVENT_CB", + mlx4_mr_mem_event_cb, NULL); } RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__); diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 19c8a223..300cb4d7 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2012 6WIND S.A. - * Copyright 2012 Mellanox + * Copyright 2012 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX4_H_ @@ -23,7 +23,9 @@ #include #include #include -#include +#include + +#include "mlx4_mr.h" #ifndef IBV_RX_HASH_INNER /** This is not necessarily defined by supported RDMA core versions. */ @@ -42,17 +44,6 @@ /** Fixed RSS hash key size in bytes. Cannot be modified. */ #define MLX4_RSS_HASH_KEY_SIZE 40 -/** - * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP - * from which buffers are to be transmitted will have to be mapped by this - * driver to their own Memory Region (MR). This is a slow operation. - * - * This value is always 1 for RX queues. - */ -#ifndef MLX4_PMD_TX_MP_CACHE -#define MLX4_PMD_TX_MP_CACHE 8 -#endif - /** Interrupt alarm timeout value in microseconds. */ #define MLX4_INTR_ALARM_TIMEOUT 100000 @@ -78,20 +69,12 @@ struct rxq; struct txq; struct rte_flow; -/** Memory region descriptor. */ -struct mlx4_mr { - LIST_ENTRY(mlx4_mr) next; /**< Next entry in list. */ - uintptr_t start; /**< Base address for memory region. */ - uintptr_t end; /**< End address for memory region. */ - uint32_t lkey; /**< L_Key extracted from @p mr. */ - uint32_t refcnt; /**< Reference count for this object. */ - struct priv *priv; /**< Back pointer to private data. */ - struct ibv_mr *mr; /**< Memory region associated with @p mp. */ - struct rte_mempool *mp; /**< Target memory pool (mempool). */ -}; +LIST_HEAD(mlx4_dev_list, priv); +LIST_HEAD(mlx4_mr_list, mlx4_mr); /** Private data structure. */ struct priv { + LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */ struct rte_eth_dev *dev; /**< Ethernet device. */ struct ibv_context *ctx; /**< Verbs context. */ struct ibv_device_attr device_attr; /**< Device properties. */ @@ -103,15 +86,22 @@ struct priv { uint32_t vf:1; /**< This is a VF device. */ uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */ uint32_t isolated:1; /**< Toggle isolated mode. */ + uint32_t rss_init:1; /**< Common RSS context is initialized. */ uint32_t hw_csum:1; /**< Checksum offload is supported. */ uint32_t hw_csum_l2tun:1; /**< Checksum support for L2 tunnels. */ + uint32_t hw_fcs_strip:1; /**< FCS stripping toggling is supported. */ uint64_t hw_rss_sup; /**< Supported RSS hash fields (Verbs format). */ struct rte_intr_handle intr_handle; /**< Port interrupt handle. */ struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */ + struct { + uint32_t dev_gen; /* Generation number to flush local caches. */ + rte_rwlock_t rwlock; /* MR Lock. */ + struct mlx4_mr_btree cache; /* Global MR cache table. */ + struct mlx4_mr_list mr_list; /* Registered MR list. */ + struct mlx4_mr_list mr_free_list; /* Freed MR list. */ + } mr; LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */ LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */ - LIST_HEAD(, mlx4_mr) mr; /**< Registered memory regions. */ - rte_spinlock_t mr_lock; /**< Lock for @p mr access. */ struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES]; /**< Configured MAC addresses. Unused entries are zeroed. */ }; @@ -131,7 +121,7 @@ void mlx4_allmulticast_disable(struct rte_eth_dev *dev); void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t vmdq); -void mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); +int mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); int mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); void mlx4_stats_reset(struct rte_eth_dev *dev); @@ -154,11 +144,4 @@ void mlx4_rxq_intr_disable(struct priv *priv); int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx); int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx); -/* mlx4_mr.c */ - -struct mlx4_mr *mlx4_mr_get(struct priv *priv, struct rte_mempool *mp); -void mlx4_mr_put(struct mlx4_mr *mr); -uint32_t mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, - uint32_t i); - #endif /* RTE_PMD_MLX4_H_ */ diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c index 3bc69273..30deb3ef 100644 --- a/drivers/net/mlx4/mlx4_ethdev.c +++ b/drivers/net/mlx4/mlx4_ethdev.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -39,6 +39,7 @@ #include #include #include +#include #include "mlx4.h" #include "mlx4_flow.h" @@ -120,7 +121,7 @@ try_dev_id: goto try_dev_id; dev_port_prev = dev_port; if (dev_port == (priv->port - 1u)) - snprintf(match, sizeof(match), "%s", name); + strlcpy(match, name, sizeof(match)); } closedir(dir); if (match[0] == '\0') { @@ -131,167 +132,6 @@ try_dev_id: return 0; } -/** - * Read from sysfs entry. - * - * @param[in] priv - * Pointer to private structure. - * @param[in] entry - * Entry name relative to sysfs path. - * @param[out] buf - * Data output buffer. - * @param size - * Buffer size. - * - * @return - * Number of bytes read on success, negative errno value otherwise and - * rte_errno is set. - */ -static int -mlx4_sysfs_read(const struct priv *priv, const char *entry, - char *buf, size_t size) -{ - char ifname[IF_NAMESIZE]; - FILE *file; - int ret; - - ret = mlx4_get_ifname(priv, &ifname); - if (ret) - return ret; - - MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path, - ifname, entry); - - file = fopen(path, "rb"); - if (file == NULL) { - rte_errno = errno; - return -rte_errno; - } - ret = fread(buf, 1, size, file); - if ((size_t)ret < size && ferror(file)) { - rte_errno = EIO; - ret = -rte_errno; - } else { - ret = size; - } - fclose(file); - return ret; -} - -/** - * Write to sysfs entry. - * - * @param[in] priv - * Pointer to private structure. - * @param[in] entry - * Entry name relative to sysfs path. - * @param[in] buf - * Data buffer. - * @param size - * Buffer size. - * - * @return - * Number of bytes written on success, negative errno value otherwise and - * rte_errno is set. - */ -static int -mlx4_sysfs_write(const struct priv *priv, const char *entry, - char *buf, size_t size) -{ - char ifname[IF_NAMESIZE]; - FILE *file; - int ret; - - ret = mlx4_get_ifname(priv, &ifname); - if (ret) - return ret; - - MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path, - ifname, entry); - - file = fopen(path, "wb"); - if (file == NULL) { - rte_errno = errno; - return -rte_errno; - } - ret = fwrite(buf, 1, size, file); - if ((size_t)ret < size || ferror(file)) { - rte_errno = EIO; - ret = -rte_errno; - } else { - ret = size; - } - fclose(file); - return ret; -} - -/** - * Get unsigned long sysfs property. - * - * @param priv - * Pointer to private structure. - * @param[in] name - * Entry name relative to sysfs path. - * @param[out] value - * Value output buffer. - * - * @return - * 0 on success, negative errno value otherwise and rte_errno is set. - */ -static int -mlx4_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value) -{ - int ret; - unsigned long value_ret; - char value_str[32]; - - ret = mlx4_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1)); - if (ret < 0) { - DEBUG("cannot read %s value from sysfs: %s", - name, strerror(rte_errno)); - return ret; - } - value_str[ret] = '\0'; - errno = 0; - value_ret = strtoul(value_str, NULL, 0); - if (errno) { - rte_errno = errno; - DEBUG("invalid %s value `%s': %s", name, value_str, - strerror(rte_errno)); - return -rte_errno; - } - *value = value_ret; - return 0; -} - -/** - * Set unsigned long sysfs property. - * - * @param priv - * Pointer to private structure. - * @param[in] name - * Entry name relative to sysfs path. - * @param value - * Value to set. - * - * @return - * 0 on success, negative errno value otherwise and rte_errno is set. - */ -static int -mlx4_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value) -{ - int ret; - MKSTR(value_str, "%lu", value); - - ret = mlx4_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1)); - if (ret < 0) { - DEBUG("cannot write %s `%s' (%lu) to sysfs: %s", - name, value_str, value, strerror(rte_errno)); - return ret; - } - return 0; -} - /** * Perform ifreq ioctl() on associated Ethernet device. * @@ -361,12 +201,12 @@ mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]) int mlx4_mtu_get(struct priv *priv, uint16_t *mtu) { - unsigned long ulong_mtu = 0; - int ret = mlx4_get_sysfs_ulong(priv, "mtu", &ulong_mtu); + struct ifreq request; + int ret = mlx4_ifreq(priv, SIOCGIFMTU, &request); if (ret) return ret; - *mtu = ulong_mtu; + *mtu = request.ifr_mtu; return 0; } @@ -385,20 +225,13 @@ int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct priv *priv = dev->data->dev_private; - uint16_t new_mtu; - int ret = mlx4_set_sysfs_ulong(priv, "mtu", mtu); + struct ifreq request = { .ifr_mtu = mtu, }; + int ret = mlx4_ifreq(priv, SIOCSIFMTU, &request); if (ret) return ret; - ret = mlx4_mtu_get(priv, &new_mtu); - if (ret) - return ret; - if (new_mtu == mtu) { - priv->mtu = mtu; - return 0; - } - rte_errno = EINVAL; - return -rte_errno; + priv->mtu = mtu; + return 0; } /** @@ -417,14 +250,14 @@ mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) { - unsigned long tmp = 0; - int ret = mlx4_get_sysfs_ulong(priv, "flags", &tmp); + struct ifreq request; + int ret = mlx4_ifreq(priv, SIOCGIFFLAGS, &request); if (ret) return ret; - tmp &= keep; - tmp |= (flags & (~keep)); - return mlx4_set_sysfs_ulong(priv, "flags", tmp); + request.ifr_flags &= keep; + request.ifr_flags |= flags & ~keep; + return mlx4_ifreq(priv, SIOCSIFFLAGS, &request); } /** @@ -701,11 +534,14 @@ mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) * Pointer to Ethernet device structure. * @param mac_addr * MAC address to register. + * + * @return + * 0 on success, negative errno value otherwise and rte_errno is set. */ -void +int mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { - mlx4_mac_addr_add(dev, mac_addr, 0, 0); + return mlx4_mac_addr_add(dev, mac_addr, 0, 0); } /** @@ -723,7 +559,6 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) unsigned int max; char ifname[IF_NAMESIZE]; - info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; info->max_rx_pktlen = 65536; @@ -752,6 +587,7 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) ETH_LINK_SPEED_20G | ETH_LINK_SPEED_40G | ETH_LINK_SPEED_56G; + info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1); } /** @@ -878,7 +714,7 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete) } link_speed = ethtool_cmd_speed(&edata); if (link_speed == -1) - dev_link.link_speed = 0; + dev_link.link_speed = ETH_SPEED_NUM_NONE; else dev_link.link_speed = link_speed; dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c index 2d55bfe0..b40e7e5c 100644 --- a/drivers/net/mlx4/mlx4_flow.c +++ b/drivers/net/mlx4/mlx4_flow.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -76,69 +76,94 @@ struct mlx4_drop { }; /** - * Convert DPDK RSS hash fields to their Verbs equivalent. + * Convert supported RSS hash field types between DPDK and Verbs formats. * - * This function returns the supported (default) set when @p rss_hf has - * special value (uint64_t)-1. + * This function returns the supported (default) set when @p types has + * special value 0. * * @param priv * Pointer to private structure. - * @param rss_hf - * Hash fields in DPDK format (see struct rte_eth_rss_conf). + * @param types + * Depending on @p verbs_to_dpdk, hash types in either DPDK (see struct + * rte_eth_rss_conf) or Verbs format. + * @param verbs_to_dpdk + * A zero value converts @p types from DPDK to Verbs, a nonzero value + * performs the reverse operation. * * @return - * A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1 - * otherwise and rte_errno is set. + * Converted RSS hash fields on success, (uint64_t)-1 otherwise and + * rte_errno is set. */ uint64_t -mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf) +mlx4_conv_rss_types(struct priv *priv, uint64_t types, int verbs_to_dpdk) { - enum { IPV4, IPV6, TCP, UDP, }; - const uint64_t in[] = { - [IPV4] = (ETH_RSS_IPV4 | - ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_TCP | - ETH_RSS_NONFRAG_IPV4_UDP | - ETH_RSS_NONFRAG_IPV4_OTHER), - [IPV6] = (ETH_RSS_IPV6 | - ETH_RSS_FRAG_IPV6 | - ETH_RSS_NONFRAG_IPV6_TCP | - ETH_RSS_NONFRAG_IPV6_UDP | - ETH_RSS_NONFRAG_IPV6_OTHER | - ETH_RSS_IPV6_EX | - ETH_RSS_IPV6_TCP_EX | - ETH_RSS_IPV6_UDP_EX), - [TCP] = (ETH_RSS_NONFRAG_IPV4_TCP | - ETH_RSS_NONFRAG_IPV6_TCP | - ETH_RSS_IPV6_TCP_EX), - [UDP] = (ETH_RSS_NONFRAG_IPV4_UDP | - ETH_RSS_NONFRAG_IPV6_UDP | - ETH_RSS_IPV6_UDP_EX), + enum { + INNER, + IPV4, IPV4_1, IPV4_2, IPV6, IPV6_1, IPV6_2, IPV6_3, + TCP, UDP, + IPV4_TCP, IPV4_UDP, IPV6_TCP, IPV6_TCP_1, IPV6_UDP, IPV6_UDP_1, }; - const uint64_t out[RTE_DIM(in)] = { - [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4, - [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6, - [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP, - [UDP] = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP, + enum { + VERBS_IPV4 = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4, + VERBS_IPV6 = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6, + VERBS_TCP = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP, + VERBS_UDP = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP, }; + static const uint64_t dpdk[] = { + [INNER] = 0, + [IPV4] = ETH_RSS_IPV4, + [IPV4_1] = ETH_RSS_FRAG_IPV4, + [IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER, + [IPV6] = ETH_RSS_IPV6, + [IPV6_1] = ETH_RSS_FRAG_IPV6, + [IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER, + [IPV6_3] = ETH_RSS_IPV6_EX, + [TCP] = 0, + [UDP] = 0, + [IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP, + [IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP, + [IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP, + [IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX, + [IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP, + [IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX, + }; + static const uint64_t verbs[RTE_DIM(dpdk)] = { + [INNER] = IBV_RX_HASH_INNER, + [IPV4] = VERBS_IPV4, + [IPV4_1] = VERBS_IPV4, + [IPV4_2] = VERBS_IPV4, + [IPV6] = VERBS_IPV6, + [IPV6_1] = VERBS_IPV6, + [IPV6_2] = VERBS_IPV6, + [IPV6_3] = VERBS_IPV6, + [TCP] = VERBS_TCP, + [UDP] = VERBS_UDP, + [IPV4_TCP] = VERBS_IPV4 | VERBS_TCP, + [IPV4_UDP] = VERBS_IPV4 | VERBS_UDP, + [IPV6_TCP] = VERBS_IPV6 | VERBS_TCP, + [IPV6_TCP_1] = VERBS_IPV6 | VERBS_TCP, + [IPV6_UDP] = VERBS_IPV6 | VERBS_UDP, + [IPV6_UDP_1] = VERBS_IPV6 | VERBS_UDP, + }; + const uint64_t *in = verbs_to_dpdk ? verbs : dpdk; + const uint64_t *out = verbs_to_dpdk ? dpdk : verbs; uint64_t seen = 0; uint64_t conv = 0; unsigned int i; - for (i = 0; i != RTE_DIM(in); ++i) - if (rss_hf & in[i]) { - seen |= rss_hf & in[i]; + if (!types) { + if (!verbs_to_dpdk) + return priv->hw_rss_sup; + types = priv->hw_rss_sup; + } + for (i = 0; i != RTE_DIM(dpdk); ++i) + if (in[i] && (types & in[i]) == in[i]) { + seen |= types & in[i]; conv |= out[i]; } - if ((conv & priv->hw_rss_sup) == conv) { - if (rss_hf == (uint64_t)-1) { - /* Include inner RSS by default if supported. */ - conv |= priv->hw_rss_sup & IBV_RX_HASH_INNER; - return conv; - } - if (!(rss_hf & ~seen)) - return conv; - } + if ((verbs_to_dpdk || (conv & priv->hw_rss_sup) == conv) && + !(types & ~seen)) + return conv; rte_errno = ENOTSUP; return (uint64_t)-1; } @@ -362,6 +387,9 @@ error: * Additional mlx4-specific constraints on supported fields: * * - No support for partial masks. + * - Due to HW/FW limitation, flow rule priority is not taken into account + * when matching UDP destination ports, doing is therefore only supported + * at the highest priority level (0). * * @param[in, out] flow * Flow rule handle to update. @@ -393,6 +421,11 @@ mlx4_flow_merge_udp(struct rte_flow *flow, msg = "mlx4 does not support matching partial UDP fields"; goto error; } + if (mask && mask->hdr.dst_port && flow->priority) { + msg = "combining UDP destination port matching with a nonzero" + " priority level is not supported"; + goto error; + } if (!flow->ibv_attr) return 0; ++flow->ibv_attr->num_of_specs; @@ -637,6 +670,7 @@ mlx4_flow_prepare(struct priv *priv, struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) }; struct rte_flow *flow = &temp; const char *msg = NULL; + int overlap; if (attr->group) return rte_flow_error_set @@ -651,12 +685,18 @@ mlx4_flow_prepare(struct priv *priv, return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, "egress is not supported"); + if (attr->transfer) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, "transfer is not supported"); if (!attr->ingress) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL, "only ingress is supported"); fill: + overlap = 0; proc = mlx4_flow_proc_item_list; + flow->priority = attr->priority; /* Go over pattern. */ for (item = pattern; item->type; ++item) { const struct mlx4_flow_proc_item *next = NULL; @@ -702,14 +742,24 @@ fill: } /* Go over actions list. */ for (action = actions; action->type; ++action) { + /* This one may appear anywhere multiple times. */ + if (action->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + /* Fate-deciding actions may appear exactly once. */ + if (overlap) { + msg = "cannot combine several fate-deciding actions," + " choose between DROP, QUEUE or RSS"; + goto exit_action_not_supported; + } + overlap = 1; switch (action->type) { const struct rte_flow_action_queue *queue; const struct rte_flow_action_rss *rss; - const struct rte_eth_rss_conf *rss_conf; + const uint8_t *rss_key; + uint32_t rss_key_len; + uint64_t fields; unsigned int i; - case RTE_FLOW_ACTION_TYPE_VOID: - continue; case RTE_FLOW_ACTION_TYPE_DROP: flow->drop = 1; break; @@ -736,54 +786,68 @@ fill: break; rss = action->conf; /* Default RSS configuration if none is provided. */ - rss_conf = - rss->rss_conf ? - rss->rss_conf : - &(struct rte_eth_rss_conf){ - .rss_key = mlx4_rss_hash_key_default, - .rss_key_len = MLX4_RSS_HASH_KEY_SIZE, - .rss_hf = -1, - }; + if (rss->key_len) { + rss_key = rss->key; + rss_key_len = rss->key_len; + } else { + rss_key = mlx4_rss_hash_key_default; + rss_key_len = MLX4_RSS_HASH_KEY_SIZE; + } /* Sanity checks. */ - for (i = 0; i < rss->num; ++i) + for (i = 0; i < rss->queue_num; ++i) if (rss->queue[i] >= priv->dev->data->nb_rx_queues) break; - if (i != rss->num) { + if (i != rss->queue_num) { msg = "queue index target beyond number of" " configured Rx queues"; goto exit_action_not_supported; } - if (!rte_is_power_of_2(rss->num)) { + if (!rte_is_power_of_2(rss->queue_num)) { msg = "for RSS, mlx4 requires the number of" " queues to be a power of two"; goto exit_action_not_supported; } - if (rss_conf->rss_key_len != - sizeof(flow->rss->key)) { + if (rss_key_len != sizeof(flow->rss->key)) { msg = "mlx4 supports exactly one RSS hash key" " length: " MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE); goto exit_action_not_supported; } - for (i = 1; i < rss->num; ++i) + for (i = 1; i < rss->queue_num; ++i) if (rss->queue[i] - rss->queue[i - 1] != 1) break; - if (i != rss->num) { + if (i != rss->queue_num) { msg = "mlx4 requires RSS contexts to use" " consecutive queue indices only"; goto exit_action_not_supported; } - if (rss->queue[0] % rss->num) { + if (rss->queue[0] % rss->queue_num) { msg = "mlx4 requires the first queue of a RSS" " context to be aligned on a multiple" " of the context size"; goto exit_action_not_supported; } + if (rss->func && + rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { + msg = "the only supported RSS hash function" + " is Toeplitz"; + goto exit_action_not_supported; + } + if (rss->level) { + msg = "a nonzero RSS encapsulation level is" + " not supported"; + goto exit_action_not_supported; + } + rte_errno = 0; + fields = mlx4_conv_rss_types(priv, rss->types, 0); + if (fields == (uint64_t)-1 && rte_errno) { + msg = "unsupported RSS hash type requested"; + goto exit_action_not_supported; + } flow->rss = mlx4_rss_get - (priv, - mlx4_conv_rss_hf(priv, rss_conf->rss_hf), - rss_conf->rss_key, rss->num, rss->queue); + (priv, fields, rss_key, rss->queue_num, + rss->queue); if (!flow->rss) { msg = "either invalid parameters or not enough" " resources for additional multi-queue" @@ -795,10 +859,9 @@ fill: goto exit_action_not_supported; } } - if (!flow->rss && !flow->drop) - return rte_flow_error_set - (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "no valid action"); + /* When fate is unknown, drop traffic. */ + if (!overlap) + flow->drop = 1; /* Validation ends here. */ if (!addr) { if (flow->rss) @@ -820,11 +883,14 @@ fill: }, }; - if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) + if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) { + if (temp.rss) + mlx4_rss_put(temp.rss); return rte_flow_error_set (error, -rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "flow rule handle allocation failure"); + } /* Most fields will be updated by second pass. */ *flow = (struct rte_flow){ .ibv_attr = temp.ibv_attr, @@ -1264,14 +1330,20 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error) */ uint32_t queues = rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1; - alignas(struct rte_flow_action_rss) uint8_t rss_conf_data - [offsetof(struct rte_flow_action_rss, queue) + - sizeof(((struct rte_flow_action_rss *)0)->queue[0]) * queues]; - struct rte_flow_action_rss *rss_conf = (void *)rss_conf_data; + uint16_t queue[queues]; + struct rte_flow_action_rss action_rss = { + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = 0, + .key_len = MLX4_RSS_HASH_KEY_SIZE, + .queue_num = queues, + .key = mlx4_rss_hash_key_default, + .queue = queue, + }; struct rte_flow_action actions[] = { { .type = RTE_FLOW_ACTION_TYPE_RSS, - .conf = rss_conf, + .conf = &action_rss, }, { .type = RTE_FLOW_ACTION_TYPE_END, @@ -1293,12 +1365,8 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error) if (!queues) goto error; /* Prepare default RSS configuration. */ - *rss_conf = (struct rte_flow_action_rss){ - .rss_conf = NULL, /* Rely on default fallback settings. */ - .num = queues, - }; for (i = 0; i != queues; ++i) - rss_conf->queue[i] = i; + queue[i] = i; /* * Set up VLAN item if filtering is enabled and at least one VLAN * filter is configured. @@ -1357,7 +1425,7 @@ next_vlan: if (j != sizeof(mac->addr_bytes)) continue; if (flow->rss->queues != queues || - memcmp(flow->rss->queue_id, rss_conf->queue, + memcmp(flow->rss->queue_id, action_rss.queue, queues * sizeof(flow->rss->queue_id[0]))) continue; break; @@ -1397,7 +1465,7 @@ next_vlan: if (flow && flow->internal) { assert(flow->rss); if (flow->rss->queues != queues || - memcmp(flow->rss->queue_id, rss_conf->queue, + memcmp(flow->rss->queue_id, action_rss.queue, queues * sizeof(flow->rss->queue_id[0]))) flow = NULL; } diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h index 00188a65..2917ebe9 100644 --- a/drivers/net/mlx4/mlx4_flow.h +++ b/drivers/net/mlx4/mlx4_flow.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX4_FLOW_H_ @@ -42,12 +42,14 @@ struct rte_flow { uint32_t promisc:1; /**< This rule matches everything. */ uint32_t allmulti:1; /**< This rule matches all multicast traffic. */ uint32_t drop:1; /**< This rule drops packets. */ + uint32_t priority; /**< Flow rule priority. */ struct mlx4_rss *rss; /**< Rx target. */ }; /* mlx4_flow.c */ -uint64_t mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf); +uint64_t mlx4_conv_rss_types(struct priv *priv, uint64_t types, + int verbs_to_dpdk); int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error); void mlx4_flow_clean(struct priv *priv); int mlx4_filter_ctrl(struct rte_eth_dev *dev, diff --git a/drivers/net/mlx4/mlx4_glue.c b/drivers/net/mlx4/mlx4_glue.c index 3b79d320..67b3bfac 100644 --- a/drivers/net/mlx4/mlx4_glue.c +++ b/drivers/net/mlx4/mlx4_glue.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2018 6WIND S.A. - * Copyright 2018 Mellanox + * Copyright 2018 Mellanox Technologies, Ltd */ #include diff --git a/drivers/net/mlx4/mlx4_glue.h b/drivers/net/mlx4/mlx4_glue.h index 368f906b..668ca867 100644 --- a/drivers/net/mlx4/mlx4_glue.h +++ b/drivers/net/mlx4/mlx4_glue.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2018 6WIND S.A. - * Copyright 2018 Mellanox + * Copyright 2018 Mellanox Technologies, Ltd */ #ifndef MLX4_GLUE_H_ diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c index 2141992e..eeb982a0 100644 --- a/drivers/net/mlx4/mlx4_intr.c +++ b/drivers/net/mlx4/mlx4_intr.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c index 9a1e4de3..d23d3c61 100644 --- a/drivers/net/mlx4/mlx4_mr.c +++ b/drivers/net/mlx4/mlx4_mr.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -30,237 +30,1152 @@ #include #include #include -#include +#include #include "mlx4_glue.h" +#include "mlx4_mr.h" #include "mlx4_rxtx.h" #include "mlx4_utils.h" -struct mlx4_check_mempool_data { +struct mr_find_contig_memsegs_data { + uintptr_t addr; + uintptr_t start; + uintptr_t end; + const struct rte_memseg_list *msl; +}; + +struct mr_update_mp_data { + struct rte_eth_dev *dev; + struct mlx4_mr_ctrl *mr_ctrl; int ret; - char *start; - char *end; }; /** - * Called by mlx4_check_mempool() when iterating the memory chunks. - * - * @param[in] mp - * Pointer to memory pool (unused). - * @param[in, out] data - * Pointer to shared buffer with mlx4_check_mempool(). - * @param[in] memhdr - * Pointer to mempool chunk header. - * @param mem_idx - * Mempool element index (unused). + * Expand B-tree table to a given size. Can't be called with holding + * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc(). + * + * @param bt + * Pointer to B-tree structure. + * @param n + * Number of entries for expansion. + * + * @return + * 0 on success, -1 on failure. */ -static void -mlx4_check_mempool_cb(struct rte_mempool *mp, void *opaque, - struct rte_mempool_memhdr *memhdr, - unsigned int mem_idx) +static int +mr_btree_expand(struct mlx4_mr_btree *bt, int n) +{ + void *mem; + int ret = 0; + + if (n <= bt->size) + return ret; + /* + * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is + * used inside if there's no room to expand. Because this is a quite + * rare case and a part of very slow path, it is very acceptable. + * Initially cache_bh[] will be given practically enough space and once + * it is expanded, expansion wouldn't be needed again ever. + */ + mem = rte_realloc(bt->table, n * sizeof(struct mlx4_mr_cache), 0); + if (mem == NULL) { + /* Not an error, B-tree search will be skipped. */ + WARN("failed to expand MR B-tree (%p) table", (void *)bt); + ret = -1; + } else { + DEBUG("expanded MR B-tree table (size=%u)", n); + bt->table = mem; + bt->size = n; + } + return ret; +} + +/** + * Look up LKey from given B-tree lookup table, store the last index and return + * searched LKey. + * + * @param bt + * Pointer to B-tree structure. + * @param[out] idx + * Pointer to index. Even on search failure, returns index where it stops + * searching so that index can be used when inserting a new entry. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mr_btree_lookup(struct mlx4_mr_btree *bt, uint16_t *idx, uintptr_t addr) +{ + struct mlx4_mr_cache *lkp_tbl; + uint16_t n; + uint16_t base = 0; + + assert(bt != NULL); + lkp_tbl = *bt->table; + n = bt->len; + /* First entry must be NULL for comparison. */ + assert(bt->len > 0 || (lkp_tbl[0].start == 0 && + lkp_tbl[0].lkey == UINT32_MAX)); + /* Binary search. */ + do { + register uint16_t delta = n >> 1; + + if (addr < lkp_tbl[base + delta].start) { + n = delta; + } else { + base += delta; + n -= delta; + } + } while (n > 1); + assert(addr >= lkp_tbl[base].start); + *idx = base; + if (addr < lkp_tbl[base].end) + return lkp_tbl[base].lkey; + /* Not found. */ + return UINT32_MAX; +} + +/** + * Insert an entry to B-tree lookup table. + * + * @param bt + * Pointer to B-tree structure. + * @param entry + * Pointer to new entry to insert. + * + * @return + * 0 on success, -1 on failure. + */ +static int +mr_btree_insert(struct mlx4_mr_btree *bt, struct mlx4_mr_cache *entry) { - struct mlx4_check_mempool_data *data = opaque; + struct mlx4_mr_cache *lkp_tbl; + uint16_t idx = 0; + size_t shift; - (void)mp; - (void)mem_idx; - /* It already failed, skip the next chunks. */ - if (data->ret != 0) + assert(bt != NULL); + assert(bt->len <= bt->size); + assert(bt->len > 0); + lkp_tbl = *bt->table; + /* Find out the slot for insertion. */ + if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) { + DEBUG("abort insertion to B-tree(%p): already exist at" + " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + /* Already exist, return. */ + return 0; + } + /* If table is full, return error. */ + if (unlikely(bt->len == bt->size)) { + bt->overflow = 1; + return -1; + } + /* Insert entry. */ + ++idx; + shift = (bt->len - idx) * sizeof(struct mlx4_mr_cache); + if (shift) + memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift); + lkp_tbl[idx] = *entry; + bt->len++; + DEBUG("inserted B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + return 0; +} + +/** + * Initialize B-tree and allocate memory for lookup table. + * + * @param bt + * Pointer to B-tree structure. + * @param n + * Number of entries to allocate. + * @param socket + * NUMA socket on which memory must be allocated. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket) +{ + if (bt == NULL) { + rte_errno = EINVAL; + return -rte_errno; + } + memset(bt, 0, sizeof(*bt)); + bt->table = rte_calloc_socket("B-tree table", + n, sizeof(struct mlx4_mr_cache), + 0, socket); + if (bt->table == NULL) { + rte_errno = ENOMEM; + ERROR("failed to allocate memory for btree cache on socket %d", + socket); + return -rte_errno; + } + bt->size = n; + /* First entry must be NULL for binary search. */ + (*bt->table)[bt->len++] = (struct mlx4_mr_cache) { + .lkey = UINT32_MAX, + }; + DEBUG("initialized B-tree %p with table %p", + (void *)bt, (void *)bt->table); + return 0; +} + +/** + * Free B-tree resources. + * + * @param bt + * Pointer to B-tree structure. + */ +void +mlx4_mr_btree_free(struct mlx4_mr_btree *bt) +{ + if (bt == NULL) return; - /* It is the first chunk. */ - if (data->start == NULL && data->end == NULL) { - data->start = memhdr->addr; - data->end = data->start + memhdr->len; + DEBUG("freeing B-tree %p with table %p", (void *)bt, (void *)bt->table); + rte_free(bt->table); + memset(bt, 0, sizeof(*bt)); +} + +#ifndef NDEBUG +/** + * Dump all the entries in a B-tree + * + * @param bt + * Pointer to B-tree structure. + */ +void +mlx4_mr_btree_dump(struct mlx4_mr_btree *bt) +{ + int idx; + struct mlx4_mr_cache *lkp_tbl; + + if (bt == NULL) return; + lkp_tbl = *bt->table; + for (idx = 0; idx < bt->len; ++idx) { + struct mlx4_mr_cache *entry = &lkp_tbl[idx]; + + DEBUG("B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); } - if (data->end == memhdr->addr) { - data->end += memhdr->len; - return; +} +#endif + +/** + * Find virtually contiguous memory chunk in a given MR. + * + * @param dev + * Pointer to MR structure. + * @param[out] entry + * Pointer to returning MR cache entry. If not found, this will not be + * updated. + * @param start_idx + * Start index of the memseg bitmap. + * + * @return + * Next index to go on lookup. + */ +static int +mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry, + int base_idx) +{ + uintptr_t start = 0; + uintptr_t end = 0; + uint32_t idx = 0; + + for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { + if (rte_bitmap_get(mr->ms_bmp, idx)) { + const struct rte_memseg_list *msl; + const struct rte_memseg *ms; + + msl = mr->msl; + ms = rte_fbarray_get(&msl->memseg_arr, + mr->ms_base_idx + idx); + assert(msl->page_sz == ms->hugepage_sz); + if (!start) + start = ms->addr_64; + end = ms->addr_64 + ms->hugepage_sz; + } else if (start) { + /* Passed the end of a fragment. */ + break; + } } - if (data->start == (char *)memhdr->addr + memhdr->len) { - data->start -= memhdr->len; - return; + if (start) { + /* Found one chunk. */ + entry->start = start; + entry->end = end; + entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey); } - /* Error, mempool is not virtually contiguous. */ - data->ret = -1; + return idx; } /** - * Check if a mempool can be used: it must be virtually contiguous. + * Insert a MR to the global B-tree cache. It may fail due to low-on-memory. + * Then, this entry will have to be searched by mr_lookup_dev_list() in + * mlx4_mr_create() on miss. * - * @param[in] mp - * Pointer to memory pool. - * @param[out] start - * Pointer to the start address of the mempool virtual memory area. - * @param[out] end - * Pointer to the end address of the mempool virtual memory area. + * @param dev + * Pointer to Ethernet device. + * @param mr + * Pointer to MR to insert. * * @return - * 0 on success (mempool is virtually contiguous), -1 on error. + * 0 on success, -1 on failure. */ static int -mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start, uintptr_t *end) +mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr) { - struct mlx4_check_mempool_data data; + struct priv *priv = dev->data->dev_private; + unsigned int n; - memset(&data, 0, sizeof(data)); - rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data); - *start = (uintptr_t)data.start; - *end = (uintptr_t)data.end; - return data.ret; + DEBUG("port %u inserting MR(%p) to global cache", + dev->data->port_id, (void *)mr); + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx4_mr_cache entry = { 0, }; + + /* Find a contiguous chunk and advance the index. */ + n = mr_find_next_chunk(mr, &entry, n); + if (!entry.end) + break; + if (mr_btree_insert(&priv->mr.cache, &entry) < 0) { + /* + * Overflowed, but the global table cannot be expanded + * because of deadlock. + */ + return -1; + } + } + return 0; } /** - * Obtain a memory region from a memory pool. + * Look up address in the original global MR list. * - * If a matching memory region already exists, it is returned with its - * reference count incremented, otherwise a new one is registered. + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry. If no match, this will not be updated. + * @param addr + * Search key. * - * @param priv - * Pointer to private structure. - * @param mp - * Pointer to memory pool. + * @return + * Found MR on match, NULL otherwise. + */ +static struct mlx4_mr * +mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + struct priv *priv = dev->data->dev_private; + struct mlx4_mr *mr; + + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) { + unsigned int n; + + if (mr->ms_n == 0) + continue; + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx4_mr_cache ret = { 0, }; + + n = mr_find_next_chunk(mr, &ret, n); + if (addr >= ret.start && addr < ret.end) { + /* Found. */ + *entry = ret; + return mr; + } + } + } + return NULL; +} + +/** + * Look up address on device. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry. If no match, this will not be updated. + * @param addr + * Search key. * * @return - * Memory region pointer, NULL in case of error and rte_errno is set. + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. */ -struct mlx4_mr * -mlx4_mr_get(struct priv *priv, struct rte_mempool *mp) +static uint32_t +mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) { - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - uintptr_t start; - uintptr_t end; - unsigned int i; + struct priv *priv = dev->data->dev_private; + uint16_t idx; + uint32_t lkey = UINT32_MAX; struct mlx4_mr *mr; - if (mlx4_check_mempool(mp, &start, &end) != 0) { - rte_errno = EINVAL; - ERROR("mempool %p: not virtually contiguous", - (void *)mp); - return NULL; + /* + * If the global cache has overflowed since it failed to expand the + * B-tree table, it can't have all the existing MRs. Then, the address + * has to be searched by traversing the original MR list instead, which + * is very slow path. Otherwise, the global cache is all inclusive. + */ + if (!unlikely(priv->mr.cache.overflow)) { + lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); + if (lkey != UINT32_MAX) + *entry = (*priv->mr.cache.table)[idx]; + } else { + /* Falling back to the slowest path. */ + mr = mr_lookup_dev_list(dev, entry, addr); + if (mr != NULL) + lkey = entry->lkey; } - DEBUG("mempool %p area start=%p end=%p size=%zu", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - /* Round start and end to page boundary if found in memory segments. */ - for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { - uintptr_t addr = (uintptr_t)ms[i].addr; - size_t len = ms[i].len; - unsigned int align = ms[i].hugepage_sz; - - if ((start > addr) && (start < addr + len)) - start = RTE_ALIGN_FLOOR(start, align); - if ((end > addr) && (end < addr + len)) - end = RTE_ALIGN_CEIL(end, align); + assert(lkey == UINT32_MAX || (addr >= entry->start && + addr < entry->end)); + return lkey; +} + +/** + * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free() + * can raise memory free event and the callback function will spin on the lock. + * + * @param mr + * Pointer to MR to free. + */ +static void +mr_free(struct mlx4_mr *mr) +{ + if (mr == NULL) + return; + DEBUG("freeing MR(%p):", (void *)mr); + if (mr->ibv_mr != NULL) + claim_zero(mlx4_glue->dereg_mr(mr->ibv_mr)); + if (mr->ms_bmp != NULL) + rte_bitmap_free(mr->ms_bmp); + rte_free(mr); +} + +/** + * Releass resources of detached MR having no online entry. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx4_mr_garbage_collect(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx4_mr *mr_next; + struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list); + + /* + * MR can't be freed with holding the lock because rte_free() could call + * memory free callback function. This will be a deadlock situation. + */ + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Detach the whole free list and release it after unlocking. */ + free_list = priv->mr.mr_free_list; + LIST_INIT(&priv->mr.mr_free_list); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Release resources. */ + mr_next = LIST_FIRST(&free_list); + while (mr_next != NULL) { + struct mlx4_mr *mr = mr_next; + + mr_next = LIST_NEXT(mr, mr); + mr_free(mr); } - DEBUG("mempool %p using start=%p end=%p size=%zu for MR", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - rte_spinlock_lock(&priv->mr_lock); - LIST_FOREACH(mr, &priv->mr, next) - if (mp == mr->mp && start >= mr->start && end <= mr->end) - break; - if (mr) { - ++mr->refcnt; - goto release; +} + +/* Called during rte_memseg_contig_walk() by mlx4_mr_create(). */ +static int +mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl, + const struct rte_memseg *ms, size_t len, void *arg) +{ + struct mr_find_contig_memsegs_data *data = arg; + + if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len) + return 0; + /* Found, save it and stop walking. */ + data->start = ms->addr_64; + data->end = ms->addr_64 + len; + data->msl = msl; + return 1; +} + +/** + * Create a new global Memroy Region (MR) for a missing virtual address. + * Register entire virtually contiguous memory chunk around the address. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this will not be updated. + * @param addr + * Target virtual address to register. + * + * @return + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. + */ +static uint32_t +mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + struct priv *priv = dev->data->dev_private; + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + const struct rte_memseg_list *msl; + const struct rte_memseg *ms; + struct mlx4_mr *mr = NULL; + size_t len; + uint32_t ms_n; + uint32_t bmp_size; + void *bmp_mem; + int ms_idx_shift = -1; + unsigned int n; + struct mr_find_contig_memsegs_data data = { + .addr = addr, + }; + struct mr_find_contig_memsegs_data data_re; + + DEBUG("port %u creating a MR using address (%p)", + dev->data->port_id, (void *)addr); + /* + * Release detached MRs if any. This can't be called with holding either + * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have + * been detached by the memory free event but it couldn't be released + * inside the callback due to deadlock. As a result, releasing resources + * is quite opportunistic. + */ + mlx4_mr_garbage_collect(dev); + /* + * Find out a contiguous virtual address chunk in use, to which the + * given address belongs, in order to register maximum range. In the + * best case where mempools are not dynamically recreated and + * '--socket-mem' is speicified as an EAL option, it is very likely to + * have only one MR(LKey) per a socket and per a hugepage-size even + * though the system memory is highly fragmented. + */ + if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) { + WARN("port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); + rte_errno = ENXIO; + goto err_nolock; } - mr = rte_malloc(__func__, sizeof(*mr), 0); - if (!mr) { +alloc_resources: + /* Addresses must be page-aligned. */ + assert(rte_is_aligned((void *)data.start, data.msl->page_sz)); + assert(rte_is_aligned((void *)data.end, data.msl->page_sz)); + msl = data.msl; + ms = rte_mem_virt2memseg((void *)data.start, msl); + len = data.end - data.start; + assert(msl->page_sz == ms->hugepage_sz); + /* Number of memsegs in the range. */ + ms_n = len / msl->page_sz; + DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " page_sz=0x%" PRIx64 ", ms_n=%u", + dev->data->port_id, (void *)addr, + data.start, data.end, msl->page_sz, ms_n); + /* Size of memory for bitmap. */ + bmp_size = rte_bitmap_get_memory_footprint(ms_n); + mr = rte_zmalloc_socket(NULL, + RTE_ALIGN_CEIL(sizeof(*mr), + RTE_CACHE_LINE_SIZE) + + bmp_size, + RTE_CACHE_LINE_SIZE, msl->socket_id); + if (mr == NULL) { + WARN("port %u unable to allocate memory for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); rte_errno = ENOMEM; - goto release; + goto err_nolock; } - *mr = (struct mlx4_mr){ - .start = start, - .end = end, - .refcnt = 1, - .priv = priv, - .mr = mlx4_glue->reg_mr(priv->pd, (void *)start, end - start, - IBV_ACCESS_LOCAL_WRITE), - .mp = mp, - }; - if (mr->mr) { - mr->lkey = mr->mr->lkey; - LIST_INSERT_HEAD(&priv->mr, mr, next); - } else { - rte_free(mr); - mr = NULL; - rte_errno = errno ? errno : EINVAL; + mr->msl = msl; + /* + * Save the index of the first memseg and initialize memseg bitmap. To + * see if a memseg of ms_idx in the memseg-list is still valid, check: + * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx) + */ + mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE); + mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size); + if (mr->ms_bmp == NULL) { + WARN("port %u unable to initialize bitamp for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); + rte_errno = EINVAL; + goto err_nolock; + } + /* + * Should recheck whether the extended contiguous chunk is still valid. + * Because memory_hotplug_lock can't be held if there's any memory + * related calls in a critical path, resource allocation above can't be + * locked. If the memory has been changed at this point, try again with + * just single page. If not, go on with the big chunk atomically from + * here. + */ + rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + data_re = data; + if (len > msl->page_sz && + !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) { + WARN("port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); + rte_errno = ENXIO; + goto err_memlock; + } + if (data.start != data_re.start || data.end != data_re.end) { + /* + * The extended contiguous chunk has been changed. Try again + * with single memseg instead. + */ + data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz); + data.end = data.start + msl->page_sz; + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + mr_free(mr); + goto alloc_resources; } -release: - rte_spinlock_unlock(&priv->mr_lock); - return mr; + assert(data.msl == data_re.msl); + rte_rwlock_write_lock(&priv->mr.rwlock); + /* + * Check the address is really missing. If other thread already created + * one or it is not found due to overflow, abort and return. + */ + if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) { + /* + * Insert to the global cache table. It may fail due to + * low-on-memory. Then, this entry will have to be searched + * here again. + */ + mr_btree_insert(&priv->mr.cache, entry); + DEBUG("port %u found MR for %p on final lookup, abort", + dev->data->port_id, (void *)addr); + rte_rwlock_write_unlock(&priv->mr.rwlock); + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + /* + * Must be unlocked before calling rte_free() because + * mlx4_mr_mem_event_free_cb() can be called inside. + */ + mr_free(mr); + return entry->lkey; + } + /* + * Trim start and end addresses for verbs MR. Set bits for registering + * memsegs but exclude already registered ones. Bitmap can be + * fragmented. + */ + for (n = 0; n < ms_n; ++n) { + uintptr_t start; + struct mlx4_mr_cache ret = { 0, }; + + start = data_re.start + n * msl->page_sz; + /* Exclude memsegs already registered by other MRs. */ + if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) { + /* + * Start from the first unregistered memseg in the + * extended range. + */ + if (ms_idx_shift == -1) { + mr->ms_base_idx += n; + data.start = start; + ms_idx_shift = n; + } + data.end = start + msl->page_sz; + rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift); + ++mr->ms_n; + } + } + len = data.end - data.start; + mr->ms_bmp_n = len / msl->page_sz; + assert(ms_idx_shift + mr->ms_bmp_n <= ms_n); + /* + * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be + * called with holding the memory lock because it doesn't use + * mlx4_alloc_buf_extern() which eventually calls rte_malloc_socket() + * through mlx4_alloc_verbs_buf(). + */ + mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)data.start, len, + IBV_ACCESS_LOCAL_WRITE); + if (mr->ibv_mr == NULL) { + WARN("port %u fail to create a verbs MR for address (%p)", + dev->data->port_id, (void *)addr); + rte_errno = EINVAL; + goto err_mrlock; + } + assert((uintptr_t)mr->ibv_mr->addr == data.start); + assert(mr->ibv_mr->length == len); + LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); + DEBUG("port %u MR CREATED (%p) for %p:\n" + " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", + dev->data->port_id, (void *)mr, (void *)addr, + data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); + /* Insert to the global cache table. */ + mr_insert_dev_cache(dev, mr); + /* Fill in output data. */ + mr_lookup_dev(dev, entry, addr); + /* Lookup can't fail. */ + assert(entry->lkey != UINT32_MAX); + rte_rwlock_write_unlock(&priv->mr.rwlock); + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + return entry->lkey; +err_mrlock: + rte_rwlock_write_unlock(&priv->mr.rwlock); +err_memlock: + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); +err_nolock: + /* + * In case of error, as this can be called in a datapath, a warning + * message per an error is preferable instead. Must be unlocked before + * calling rte_free() because mlx4_mr_mem_event_free_cb() can be called + * inside. + */ + mr_free(mr); + return UINT32_MAX; } /** - * Release a memory region. + * Rebuild the global B-tree cache of device from the original MR list. * - * This function decrements its reference count and destroys it after - * reaching 0. + * @param dev + * Pointer to Ethernet device. + */ +static void +mr_rebuild_dev_cache(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx4_mr *mr; + + DEBUG("port %u rebuild dev cache[]", dev->data->port_id); + /* Flush cache to rebuild. */ + priv->mr.cache.len = 1; + priv->mr.cache.overflow = 0; + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) + if (mr_insert_dev_cache(dev, mr) < 0) + return; +} + +/** + * Callback for memory free event. Iterate freed memsegs and check whether it + * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a + * result, the MR would be fragmented. If it becomes empty, the MR will be freed + * later by mlx4_mr_garbage_collect(). * - * Note to avoid race conditions given this function may be used from the - * data plane, it's extremely important that each user holds its own - * reference. + * The global cache must be rebuilt if there's any change and this event has to + * be propagated to dataplane threads to flush the local caches. * - * @param mr - * Memory region to release. + * @param dev + * Pointer to Ethernet device. + * @param addr + * Address of freed memory. + * @param len + * Size of freed memory. + */ +static void +mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_memseg_list *msl; + struct mlx4_mr *mr; + int ms_n; + int i; + int rebuild = 0; + + DEBUG("port %u free callback: addr=%p, len=%zu", + dev->data->port_id, addr, len); + msl = rte_mem_virt2memseg_list(addr); + /* addr and len must be page-aligned. */ + assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz)); + assert(len == RTE_ALIGN(len, msl->page_sz)); + ms_n = len / msl->page_sz; + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Clear bits of freed memsegs from MR. */ + for (i = 0; i < ms_n; ++i) { + const struct rte_memseg *ms; + struct mlx4_mr_cache entry; + uintptr_t start; + int ms_idx; + uint32_t pos; + + /* Find MR having this memseg. */ + start = (uintptr_t)addr + i * msl->page_sz; + mr = mr_lookup_dev_list(dev, &entry, start); + if (mr == NULL) + continue; + ms = rte_mem_virt2memseg((void *)start, msl); + assert(ms != NULL); + assert(msl->page_sz == ms->hugepage_sz); + ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + pos = ms_idx - mr->ms_base_idx; + assert(rte_bitmap_get(mr->ms_bmp, pos)); + assert(pos < mr->ms_bmp_n); + DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p", + dev->data->port_id, (void *)mr, pos, (void *)start); + rte_bitmap_clear(mr->ms_bmp, pos); + if (--mr->ms_n == 0) { + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); + DEBUG("port %u remove MR(%p) from list", + dev->data->port_id, (void *)mr); + } + /* + * MR is fragmented or will be freed. the global cache must be + * rebuilt. + */ + rebuild = 1; + } + if (rebuild) { + mr_rebuild_dev_cache(dev); + /* + * Flush local caches by propagating invalidation across cores. + * rte_smp_wmb() is enough to synchronize this event. If one of + * freed memsegs is seen by other core, that means the memseg + * has been allocated by allocator, which will come after this + * free call. Therefore, this store instruction (incrementing + * generation below) will be guaranteed to be seen by other core + * before the core sees the newly allocated memory. + */ + ++priv->mr.dev_gen; + DEBUG("broadcasting local cache flush, gen=%d", + priv->mr.dev_gen); + rte_smp_wmb(); + } + rte_rwlock_write_unlock(&priv->mr.rwlock); +#ifndef NDEBUG + if (rebuild) + mlx4_mr_dump_dev(dev); +#endif +} + +/** + * Callback for memory event. + * + * @param event_type + * Memory event type. + * @param addr + * Address of memory. + * @param len + * Size of memory. */ void -mlx4_mr_put(struct mlx4_mr *mr) +mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg __rte_unused) { - struct priv *priv = mr->priv; - - rte_spinlock_lock(&priv->mr_lock); - assert(mr->refcnt); - if (--mr->refcnt) - goto release; - LIST_REMOVE(mr, next); - claim_zero(mlx4_glue->dereg_mr(mr->mr)); - rte_free(mr); -release: - rte_spinlock_unlock(&priv->mr_lock); + struct priv *priv; + + switch (event_type) { + case RTE_MEM_EVENT_FREE: + rte_rwlock_read_lock(&mlx4_mem_event_rwlock); + /* Iterate all the existing mlx4 devices. */ + LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb) + mlx4_mr_mem_event_free_cb(priv->dev, addr, len); + rte_rwlock_read_unlock(&mlx4_mem_event_rwlock); + break; + case RTE_MEM_EVENT_ALLOC: + default: + break; + } } /** - * Add memory region (MR) <-> memory pool (MP) association to txq->mp2mr[]. - * If mp2mr[] is full, remove an entry first. + * Look up address in the global MR cache table. If not found, create a new MR. + * Insert the found/created entry to local bottom-half cache table. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this is not written. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct mlx4_mr_cache *entry, uintptr_t addr) +{ + struct priv *priv = dev->data->dev_private; + struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh; + uint16_t idx; + uint32_t lkey; + + /* If local cache table is full, try to double it. */ + if (unlikely(bt->len == bt->size)) + mr_btree_expand(bt, bt->size << 1); + /* Look up in the global cache. */ + rte_rwlock_read_lock(&priv->mr.rwlock); + lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); + if (lkey != UINT32_MAX) { + /* Found. */ + *entry = (*priv->mr.cache.table)[idx]; + rte_rwlock_read_unlock(&priv->mr.rwlock); + /* + * Update local cache. Even if it fails, return the found entry + * to update top-half cache. Next time, this entry will be found + * in the global cache. + */ + mr_btree_insert(bt, entry); + return lkey; + } + rte_rwlock_read_unlock(&priv->mr.rwlock); + /* First time to see the address? Create a new MR. */ + lkey = mlx4_mr_create(dev, entry, addr); + /* + * Update the local cache if successfully created a new global MR. Even + * if failed to create one, there's no action to take in this datapath + * code. As returning LKey is invalid, this will eventually make HW + * fail. + */ + if (lkey != UINT32_MAX) + mr_btree_insert(bt, entry); + return lkey; +} + +/** + * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if + * misses, search in the global MR cache table and update the new entry to + * per-queue local caches. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mlx4_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + uintptr_t addr) +{ + uint32_t lkey; + uint16_t bh_idx = 0; + /* Victim in top-half cache to replace with new entry. */ + struct mlx4_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head]; + + /* Binary-search MR translation table. */ + lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr); + /* Update top-half cache. */ + if (likely(lkey != UINT32_MAX)) { + *repl = (*mr_ctrl->cache_bh.table)[bh_idx]; + } else { + /* + * If missed in local lookup table, search in the global cache + * and local cache_bh[] will be updated inside if possible. + * Top-half cache entry will also be updated. + */ + lkey = mlx4_mr_lookup_dev(dev, mr_ctrl, repl, addr); + if (unlikely(lkey == UINT32_MAX)) + return UINT32_MAX; + } + /* Update the most recently used entry. */ + mr_ctrl->mru = mr_ctrl->head; + /* Point to the next victim, the oldest. */ + mr_ctrl->head = (mr_ctrl->head + 1) % MLX4_MR_CACHE_N; + return lkey; +} + +/** + * Bottom-half of LKey search on Rx. + * + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr) +{ + struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + struct priv *priv = rxq->priv; + + DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p", + rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr); + return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr); +} + +/** + * Bottom-half of LKey search on Tx. * * @param txq * Pointer to Tx queue structure. - * @param[in] mp - * Memory pool for which a memory region lkey must be added. - * @param[in] i - * Index in memory pool (MP) where to add memory region (MR). + * @param addr + * Search key. * * @return - * Added mr->lkey on success, (uint32_t)-1 on failure. + * Searched LKey on success, UINT32_MAX on no match. */ uint32_t -mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, uint32_t i) +mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr) { + struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct priv *priv = txq->priv; + + DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p", + txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr); + return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr); +} + +/** + * Flush all of the local cache entries. + * + * @param mr_ctrl + * Pointer to per-queue MR control structure. + */ +void +mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl) +{ + /* Reset the most-recently-used index. */ + mr_ctrl->mru = 0; + /* Reset the linear search array. */ + mr_ctrl->head = 0; + memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache)); + /* Reset the B-tree table. */ + mr_ctrl->cache_bh.len = 1; + mr_ctrl->cache_bh.overflow = 0; + /* Update the generation number. */ + mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr; + DEBUG("mr_ctrl(%p): flushed, cur_gen=%d", + (void *)mr_ctrl, mr_ctrl->cur_gen); +} + +/* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */ +static void +mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque, + struct rte_mempool_memhdr *memhdr, + unsigned mem_idx __rte_unused) +{ + struct mr_update_mp_data *data = opaque; + uint32_t lkey; + + /* Stop iteration if failed in the previous walk. */ + if (data->ret < 0) + return; + /* Register address of the chunk and update local caches. */ + lkey = mlx4_mr_addr2mr_bh(data->dev, data->mr_ctrl, + (uintptr_t)memhdr->addr); + if (lkey == UINT32_MAX) + data->ret = -1; +} + +/** + * Register entire memory chunks in a Mempool. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param mp + * Pointer to registering Mempool. + * + * @return + * 0 on success, -1 on failure. + */ +int +mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct rte_mempool *mp) +{ + struct mr_update_mp_data data = { + .dev = dev, + .mr_ctrl = mr_ctrl, + .ret = 0, + }; + + rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data); + return data.ret; +} + +#ifndef NDEBUG +/** + * Dump all the created MRs and the global cache entries. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx4_mr_dump_dev(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; struct mlx4_mr *mr; + int mr_n = 0; + int chunk_n = 0; + + rte_rwlock_read_lock(&priv->mr.rwlock); + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) { + unsigned int n; + + DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u", + dev->data->port_id, mr_n++, + rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_n, mr->ms_bmp_n); + if (mr->ms_n == 0) + continue; + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx4_mr_cache ret = { 0, }; - /* Add a new entry, register MR first. */ - DEBUG("%p: discovered new memory pool \"%s\" (%p)", - (void *)txq, mp->name, (void *)mp); - mr = mlx4_mr_get(txq->priv, mp); - if (unlikely(mr == NULL)) { - DEBUG("%p: unable to configure MR, mlx4_mr_get() failed", - (void *)txq); - return (uint32_t)-1; + n = mr_find_next_chunk(mr, &ret, n); + if (!ret.end) + break; + DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")", + chunk_n++, ret.start, ret.end); + } } - if (unlikely(i == RTE_DIM(txq->mp2mr))) { - /* Table is full, remove oldest entry. */ - DEBUG("%p: MR <-> MP table full, dropping oldest entry.", - (void *)txq); - --i; - mlx4_mr_put(txq->mp2mr[0].mr); - memmove(&txq->mp2mr[0], &txq->mp2mr[1], - (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); + DEBUG("port %u dumping global cache", dev->data->port_id); + mlx4_mr_btree_dump(&priv->mr.cache); + rte_rwlock_read_unlock(&priv->mr.rwlock); +} +#endif + +/** + * Release all the created MRs and resources. Remove device from memory callback + * list. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx4_mr_release(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx4_mr *mr_next = LIST_FIRST(&priv->mr.mr_list); + + /* Remove from memory callback device list. */ + rte_rwlock_write_lock(&mlx4_mem_event_rwlock); + LIST_REMOVE(priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx4_mem_event_rwlock); +#ifndef NDEBUG + mlx4_mr_dump_dev(dev); +#endif + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Detach from MR list and move to free list. */ + while (mr_next != NULL) { + struct mlx4_mr *mr = mr_next; + + mr_next = LIST_NEXT(mr, mr); + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); } - /* Store the new entry. */ - txq->mp2mr[i].mp = mp; - txq->mp2mr[i].mr = mr; - txq->mp2mr[i].lkey = mr->lkey; - DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, - (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey); - return txq->mp2mr[i].lkey; + LIST_INIT(&priv->mr.mr_list); + /* Free global cache. */ + mlx4_mr_btree_free(&priv->mr.cache); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Free all remaining MRs. */ + mlx4_mr_garbage_collect(dev); } diff --git a/drivers/net/mlx4/mlx4_mr.h b/drivers/net/mlx4/mlx4_mr.h new file mode 100644 index 00000000..37a365a8 --- /dev/null +++ b/drivers/net/mlx4/mlx4_mr.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX4_MR_H_ +#define RTE_PMD_MLX4_MR_H_ + +#include +#include +#include + +/* Verbs headers do not support -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include + +/* Size of per-queue MR cache array for linear search. */ +#define MLX4_MR_CACHE_N 8 + +/* Size of MR cache table for binary search. */ +#define MLX4_MR_BTREE_CACHE_N 256 + +/* Memory Region object. */ +struct mlx4_mr { + LIST_ENTRY(mlx4_mr) mr; /**< Pointer to the prev/next entry. */ + struct ibv_mr *ibv_mr; /* Verbs Memory Region. */ + const struct rte_memseg_list *msl; + int ms_base_idx; /* Start index of msl->memseg_arr[]. */ + int ms_n; /* Number of memsegs in use. */ + uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */ + struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */ +}; + +/* Cache entry for Memory Region. */ +struct mlx4_mr_cache { + uintptr_t start; /* Start address of MR. */ + uintptr_t end; /* End address of MR. */ + uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */ +} __rte_packed; + +/* MR Cache table for Binary search. */ +struct mlx4_mr_btree { + uint16_t len; /* Number of entries. */ + uint16_t size; /* Total number of entries. */ + int overflow; /* Mark failure of table expansion. */ + struct mlx4_mr_cache (*table)[]; +} __rte_packed; + +/* Per-queue MR control descriptor. */ +struct mlx4_mr_ctrl { + uint32_t *dev_gen_ptr; /* Generation number of device to poll. */ + uint32_t cur_gen; /* Generation number saved to flush caches. */ + uint16_t mru; /* Index of last hit entry in top-half cache. */ + uint16_t head; /* Index of the oldest entry in top-half cache. */ + struct mlx4_mr_cache cache[MLX4_MR_CACHE_N]; /* Cache for top-half. */ + struct mlx4_mr_btree cache_bh; /* Cache for bottom-half. */ +} __rte_packed; + +extern struct mlx4_dev_list mlx4_mem_event_cb_list; +extern rte_rwlock_t mlx4_mem_event_rwlock; + +/* First entry must be NULL for comparison. */ +#define mlx4_mr_btree_len(bt) ((bt)->len - 1) + +int mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket); +void mlx4_mr_btree_free(struct mlx4_mr_btree *bt); +void mlx4_mr_btree_dump(struct mlx4_mr_btree *bt); +void mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg); +int mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct rte_mempool *mp); +void mlx4_mr_dump_dev(struct rte_eth_dev *dev); +void mlx4_mr_release(struct rte_eth_dev *dev); + +/** + * Look up LKey from given lookup table by linear search. Firstly look up the + * last-hit entry. If miss, the entire array is searched. If found, update the + * last-hit index and return LKey. + * + * @param lkp_tbl + * Pointer to lookup table. + * @param[in,out] cached_idx + * Pointer to last-hit index. + * @param n + * Size of lookup table. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx4_mr_lookup_cache(struct mlx4_mr_cache *lkp_tbl, uint16_t *cached_idx, + uint16_t n, uintptr_t addr) +{ + uint16_t idx; + + if (likely(addr >= lkp_tbl[*cached_idx].start && + addr < lkp_tbl[*cached_idx].end)) + return lkp_tbl[*cached_idx].lkey; + for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) { + if (addr >= lkp_tbl[idx].start && + addr < lkp_tbl[idx].end) { + /* Found. */ + *cached_idx = idx; + return lkp_tbl[idx].lkey; + } + } + return UINT32_MAX; +} + +#endif /* RTE_PMD_MLX4_MR_H_ */ diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h index 153dda52..e15a3c14 100644 --- a/drivers/net/mlx4/mlx4_prm.h +++ b/drivers/net/mlx4/mlx4_prm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef MLX4_PRM_H_ diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c index 7a036ed8..87688c1c 100644 --- a/drivers/net/mlx4/mlx4_rxq.c +++ b/drivers/net/mlx4/mlx4_rxq.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -88,7 +88,7 @@ mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = { */ struct mlx4_rss * mlx4_rss_get(struct priv *priv, uint64_t fields, - uint8_t key[MLX4_RSS_HASH_KEY_SIZE], + const uint8_t key[MLX4_RSS_HASH_KEY_SIZE], uint16_t queues, const uint16_t queue_id[]) { struct mlx4_rss *rss; @@ -336,6 +336,8 @@ mlx4_rss_init(struct priv *priv) unsigned int i; int ret; + if (priv->rss_init) + return 0; /* Prepare range for RSS contexts before creating the first WQ. */ ret = mlx4_glue->dv_set_context_attr (priv->ctx, @@ -418,6 +420,7 @@ wq_num_check: } wq_num_prev = wq_num; } + priv->rss_init = 1; return 0; error: ERROR("cannot initialize common RSS resources (queue %u): %s: %s", @@ -446,6 +449,8 @@ mlx4_rss_deinit(struct priv *priv) { unsigned int i; + if (!priv->rss_init) + return; for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) { struct rxq *rxq = priv->dev->data->rx_queues[i]; @@ -454,6 +459,7 @@ mlx4_rss_deinit(struct priv *priv) mlx4_rxq_detach(rxq); } } + priv->rss_init = 0; } /** @@ -482,6 +488,7 @@ mlx4_rxq_attach(struct rxq *rxq) } struct priv *priv = rxq->priv; + struct rte_eth_dev *dev = priv->dev; const uint32_t elts_n = 1 << rxq->elts_n; const uint32_t sges_n = 1 << rxq->sges_n; struct rte_mbuf *(*elts)[elts_n] = rxq->elts; @@ -491,6 +498,8 @@ mlx4_rxq_attach(struct rxq *rxq) const char *msg; struct ibv_cq *cq = NULL; struct ibv_wq *wq = NULL; + uint32_t create_flags = 0; + uint32_t comp_mask = 0; volatile struct mlx4_wqe_data_seg (*wqes)[]; unsigned int i; int ret; @@ -503,6 +512,11 @@ mlx4_rxq_attach(struct rxq *rxq) msg = "CQ creation failure"; goto error; } + /* By default, FCS (CRC) is stripped by hardware. */ + if (rxq->crc_present) { + create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; + comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; + } wq = mlx4_glue->create_wq (priv->ctx, &(struct ibv_wq_init_attr){ @@ -511,6 +525,8 @@ mlx4_rxq_attach(struct rxq *rxq) .max_sge = sges_n, .pd = priv->pd, .cq = cq, + .comp_mask = comp_mask, + .create_flags = create_flags, }); if (!wq) { ret = errno ? errno : EINVAL; @@ -537,6 +553,11 @@ mlx4_rxq_attach(struct rxq *rxq) msg = "failed to obtain device information from WQ/CQ objects"; goto error; } + /* Pre-register Rx mempool. */ + DEBUG("port %u Rx queue %u registering mp %s having %u chunks", + priv->dev->data->port_id, rxq->stats.idx, + rxq->mp->name, rxq->mp->nb_mem_chunks); + mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp); wqes = (volatile struct mlx4_wqe_data_seg (*)[]) ((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset); for (i = 0; i != RTE_DIM(*elts); ++i) { @@ -568,7 +589,7 @@ mlx4_rxq_attach(struct rxq *rxq) .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)), .byte_count = rte_cpu_to_be_32(buf->data_len), - .lkey = rte_cpu_to_be_32(rxq->mr->lkey), + .lkey = mlx4_rx_mb2mr(rxq, buf), }; (*elts)[i] = buf; } @@ -597,6 +618,7 @@ error: claim_zero(mlx4_glue->destroy_wq(wq)); if (cq) claim_zero(mlx4_glue->destroy_cq(cq)); + --rxq->usecnt; rte_errno = ret; ERROR("error while attaching Rx queue %p: %s: %s", (void *)rxq, msg, strerror(ret)); @@ -675,26 +697,6 @@ mlx4_get_rx_port_offloads(struct priv *priv) return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param priv - * Pointer to private structure. - * @param requested - * Per-queue offloads configuration. - * - * @return - * Nonzero when configuration is valid. - */ -static int -mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested) -{ - uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads; - uint64_t supported = mlx4_get_rx_port_offloads(priv); - - return !((mandatory ^ requested) & supported); -} - /** * DPDK callback to configure a Rx queue. * @@ -736,20 +738,14 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, }, }; int ret; + uint32_t crc_present; + uint64_t offloads; + + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; - (void)conf; /* Thresholds configuration (ignored). */ DEBUG("%p: configuring queue %u for %u descriptors", (void *)dev, idx, desc); - if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) { - rte_errno = ENOTSUP; - ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port " - "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, - (void *)dev, conf->offloads, - dev->data->dev_conf.rxmode.offloads, - (mlx4_get_rx_port_offloads(priv) | - mlx4_get_rx_queue_offloads(priv))); - return -rte_errno; - } + if (idx >= dev->data->nb_rx_queues) { rte_errno = EOVERFLOW; ERROR("%p: queue index out of range (%u >= %u)", @@ -774,6 +770,23 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, " to the next power of two (%u)", (void *)dev, idx, desc); } + /* By default, FCS (CRC) is stripped by hardware. */ + if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) { + crc_present = 0; + } else if (priv->hw_fcs_strip) { + crc_present = 1; + } else { + WARN("%p: CRC stripping has been disabled but will still" + " be performed by hardware, make sure MLNX_OFED and" + " firmware are up to date", + (void *)dev); + crc_present = 0; + } + DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from" + " incoming frames to hide it", + (void *)dev, + crc_present ? "disabled" : "enabled", + crc_present << 2); /* Allocate and initialize Rx queue. */ mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket); if (!rxq) { @@ -790,9 +803,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, .elts = elts, /* Toggle Rx checksum offload if hardware supports it. */ .csum = priv->hw_csum && - (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM), + (offloads & DEV_RX_OFFLOAD_CHECKSUM), .csum_l2tun = priv->hw_csum_l2tun && - (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM), + (offloads & DEV_RX_OFFLOAD_CHECKSUM), + .crc_present = crc_present, .l2tun_offload = priv->hw_csum_l2tun, .stats = { .idx = idx, @@ -804,7 +818,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) { ; - } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) { + } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { uint32_t size = RTE_PKTMBUF_HEADROOM + dev->data->dev_conf.rxmode.max_rx_pkt_len; @@ -847,11 +861,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 1 << rxq->sges_n); goto error; } - /* Use the entire Rx mempool as the memory region. */ - rxq->mr = mlx4_mr_get(priv, mp); - if (!rxq->mr) { - ERROR("%p: MR creation failure: %s", - (void *)dev, strerror(rte_errno)); + if (mlx4_mr_btree_init(&rxq->mr_ctrl.cache_bh, + MLX4_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ goto error; } if (dev->data->dev_conf.intr_conf.rxq) { @@ -911,7 +923,6 @@ mlx4_rx_queue_release(void *dpdk_rxq) assert(!rxq->rq_db); if (rxq->channel) claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel)); - if (rxq->mr) - mlx4_mr_put(rxq->mr); + mlx4_mr_btree_free(&rxq->mr_ctrl.cache_bh); rte_free(rxq); } diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c index 8ca8b77c..a92da66b 100644 --- a/drivers/net/mlx4/mlx4_rxtx.c +++ b/drivers/net/mlx4/mlx4_rxtx.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -263,7 +263,7 @@ mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, volatile uint32_t *start, } while (start != (volatile uint32_t *)sq->eob); start = (volatile uint32_t *)sq->buf; /* Flip invalid stamping ownership. */ - stamp ^= RTE_BE32(0x1 << MLX4_SQ_OWNER_BIT); + stamp ^= RTE_BE32(1u << MLX4_SQ_OWNER_BIT); sq->stamp = stamp; if (start == end) return size; @@ -343,24 +343,6 @@ mlx4_txq_complete(struct txq *txq, const unsigned int elts_m, txq->elts_tail = elts_tail; } -/** - * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which - * the cloned mbuf is allocated is returned instead. - * - * @param buf - * Pointer to mbuf. - * - * @return - * Memory pool where data is located for given mbuf. - */ -static struct rte_mempool * -mlx4_txq_mb2mp(struct rte_mbuf *buf) -{ - if (unlikely(RTE_MBUF_INDIRECT(buf))) - return rte_mbuf_from_indirect(buf)->pool; - return buf->pool; -} - /** * Write Tx data segment to the SQ. * @@ -378,7 +360,7 @@ mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg, uint32_t lkey, uintptr_t addr, rte_be32_t byte_count) { dseg->addr = rte_cpu_to_be_64(addr); - dseg->lkey = rte_cpu_to_be_32(lkey); + dseg->lkey = lkey; #if RTE_CACHE_LINE_SIZE < 64 /* * Need a barrier here before writing the byte_count @@ -437,7 +419,7 @@ mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq, goto txbb_tail_segs; txbb_head_seg: /* Memory region key (big endian) for this memory pool. */ - lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf)); + lkey = mlx4_tx_mb2mr(txq, sbuf); if (unlikely(lkey == (uint32_t)-1)) { DEBUG("%p: unable to get MP <-> MR association", (void *)txq); @@ -449,7 +431,7 @@ txbb_head_seg: dseg = (volatile struct mlx4_wqe_data_seg *) sq->buf; dseg->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(sbuf, uintptr_t)); - dseg->lkey = rte_cpu_to_be_32(lkey); + dseg->lkey = lkey; /* * This data segment starts at the beginning of a new * TXBB, so we need to postpone its byte_count writing @@ -469,7 +451,7 @@ txbb_tail_segs: /* Jump to default if there are more than two segments remaining. */ switch (nb_segs) { default: - lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf)); + lkey = mlx4_tx_mb2mr(txq, sbuf); if (unlikely(lkey == (uint32_t)-1)) { DEBUG("%p: unable to get MP <-> MR association", (void *)txq); @@ -485,7 +467,7 @@ txbb_tail_segs: nb_segs--; /* fallthrough */ case 2: - lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf)); + lkey = mlx4_tx_mb2mr(txq, sbuf); if (unlikely(lkey == (uint32_t)-1)) { DEBUG("%p: unable to get MP <-> MR association", (void *)txq); @@ -501,7 +483,7 @@ txbb_tail_segs: nb_segs--; /* fallthrough */ case 1: - lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf)); + lkey = mlx4_tx_mb2mr(txq, sbuf); if (unlikely(lkey == (uint32_t)-1)) { DEBUG("%p: unable to get MP <-> MR association", (void *)txq); @@ -611,7 +593,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) elt->buf = NULL; break; } - lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf)); + lkey = mlx4_tx_mb2mr(txq, buf); if (unlikely(lkey == (uint32_t)-1)) { /* MR does not exist. */ DEBUG("%p: unable to get MP <-> MR association", @@ -639,7 +621,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) ctrl_next = (volatile struct mlx4_wqe_ctrl_seg *) ((volatile uint8_t *)ctrl_next - sq->size); /* Flip HW valid ownership. */ - sq->owner_opcode ^= 0x1 << MLX4_SQ_OWNER_BIT; + sq->owner_opcode ^= 1u << MLX4_SQ_OWNER_BIT; } /* * For raw Ethernet, the SOLICIT flag is used to indicate @@ -934,11 +916,14 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) goto skip; } pkt = seg; + assert(len >= (rxq->crc_present << 2)); /* Update packet information. */ pkt->packet_type = rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload); pkt->ol_flags = PKT_RX_RSS_HASH; pkt->hash.rss = cqe->immed_rss_invalid; + if (rxq->crc_present) + len -= ETHER_CRC_LEN; pkt->pkt_len = len; if (rxq->csum | rxq->csum_l2tun) { uint32_t flags = @@ -963,6 +948,9 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * changes. */ scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx4_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + scat->lkey = mlx4_rx_mb2mr(rxq, rep); if (len > seg->data_len) { len -= seg->data_len; ++pkt->nb_segs; diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h index c12bd39a..4c025e3a 100644 --- a/drivers/net/mlx4/mlx4_rxtx.h +++ b/drivers/net/mlx4/mlx4_rxtx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef MLX4_RXTX_H_ @@ -25,6 +25,7 @@ #include "mlx4.h" #include "mlx4_prm.h" +#include "mlx4_mr.h" /** Rx queue counters. */ struct mlx4_rxq_stats { @@ -39,7 +40,6 @@ struct mlx4_rxq_stats { struct rxq { struct priv *priv; /**< Back pointer to private data. */ struct rte_mempool *mp; /**< Memory pool for allocations. */ - struct mlx4_mr *mr; /**< Memory region. */ struct ibv_cq *cq; /**< Completion queue. */ struct ibv_wq *wq; /**< Work queue. */ struct ibv_comp_channel *channel; /**< Rx completion channel. */ @@ -47,11 +47,13 @@ struct rxq { uint16_t port_id; /**< Port ID for incoming packets. */ uint16_t sges_n; /**< Number of segments per packet (log2 value). */ uint16_t elts_n; /**< Mbuf queue size (log2 value). */ + struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */ struct rte_mbuf *(*elts)[]; /**< Rx elements. */ volatile struct mlx4_wqe_data_seg (*wqes)[]; /**< HW queue entries. */ volatile uint32_t *rq_db; /**< RQ doorbell record. */ uint32_t csum:1; /**< Enable checksum offloading. */ uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */ + uint32_t crc_present:1; /**< CRC must be subtracted. */ uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */ struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */ struct mlx4_rxq_stats stats; /**< Rx queue counters. */ @@ -83,7 +85,7 @@ struct txq_elt { }; }; -/** Rx queue counters. */ +/** Tx queue counters. */ struct mlx4_txq_stats { unsigned int idx; /**< Mapping index. */ uint64_t opackets; /**< Total of successfully sent packets. */ @@ -100,6 +102,7 @@ struct txq { int elts_comp_cd; /**< Countdown for next completion. */ unsigned int elts_comp_cd_init; /**< Initial value for countdown. */ unsigned int elts_n; /**< (*elts)[] length. */ + struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */ struct txq_elt (*elts)[]; /**< Tx elements. */ struct mlx4_txq_stats stats; /**< Tx queue counters. */ uint32_t max_inline; /**< Max inline send size. */ @@ -108,11 +111,6 @@ struct txq { uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */ uint8_t *bounce_buf; /**< Memory used for storing the first DWORD of data TXBBs. */ - struct { - const struct rte_mempool *mp; /**< Cached memory pool. */ - struct mlx4_mr *mr; /**< Memory region (for mp). */ - uint32_t lkey; /**< mr->lkey copy. */ - } mp2mr[MLX4_PMD_TX_MP_CACHE]; /**< MP to MR translation table. */ struct priv *priv; /**< Back pointer to private data. */ unsigned int socket; /**< CPU socket ID for allocations. */ struct ibv_cq *cq; /**< Completion queue. */ @@ -126,7 +124,7 @@ uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE]; int mlx4_rss_init(struct priv *priv); void mlx4_rss_deinit(struct priv *priv); struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields, - uint8_t key[MLX4_RSS_HASH_KEY_SIZE], + const uint8_t key[MLX4_RSS_HASH_KEY_SIZE], uint16_t queues, const uint16_t queue_id[]); void mlx4_rss_put(struct mlx4_rss *rss); int mlx4_rss_attach(struct mlx4_rss *rss); @@ -160,34 +158,70 @@ int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, const struct rte_eth_txconf *conf); void mlx4_tx_queue_release(void *dpdk_txq); +/* mlx4_mr.c */ + +void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl); +uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr); +uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr); + /** - * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[]. - * Call mlx4_txq_add_mr() if MP is not registered yet. + * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx + * as mempool is pre-configured and static. + * + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Address to search. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx4_rx_addr2mr(struct rxq *rxq, uintptr_t addr) +{ + struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + uint32_t lkey; + + /* Linear search on MR cache array. */ + lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, + MLX4_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half (Binary Search) on miss. */ + return mlx4_rx_addr2mr_bh(rxq, addr); +} + +#define mlx4_rx_mb2mr(rxq, mb) mlx4_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) + +/** + * Query LKey from a packet buffer for Tx. If not found, add the mempool. * * @param txq * Pointer to Tx queue structure. - * @param[in] mp - * Memory pool for which a memory region lkey must be returned. + * @param addr + * Address to search. * * @return - * mr->lkey on success, (uint32_t)-1 on failure. + * Searched LKey on success, UINT32_MAX on no match. */ -static inline uint32_t -mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp) +static __rte_always_inline uint32_t +mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr) { - unsigned int i; - - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (unlikely(txq->mp2mr[i].mp == NULL)) { - /* Unknown MP, add a new MR for it. */ - break; - } - if (txq->mp2mr[i].mp == mp) { - /* MP found MP. */ - return txq->mp2mr[i].lkey; - } - } - return mlx4_txq_add_mr(txq, mp, i); + struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + uint32_t lkey; + + /* Check generation bit to see if there's any change on existing MRs. */ + if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen)) + mlx4_mr_flush_local_cache(mr_ctrl); + /* Linear search on MR cache array. */ + lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, + MLX4_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half (binary search) on miss. */ + return mlx4_tx_addr2mr_bh(txq, addr); } +#define mlx4_tx_mb2mr(rxq, mb) mlx4_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) + #endif /* MLX4_RXTX_H_ */ diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c index 071b2d5d..6edaadbb 100644 --- a/drivers/net/mlx4/mlx4_txq.c +++ b/drivers/net/mlx4/mlx4_txq.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** @@ -63,64 +63,6 @@ mlx4_txq_free_elts(struct txq *txq) txq->elts_tail = txq->elts_head; } -struct txq_mp2mr_mbuf_check_data { - int ret; -}; - -/** - * Callback function for rte_mempool_obj_iter() to check whether a given - * mempool object looks like a mbuf. - * - * @param[in] mp - * The mempool pointer - * @param[in] arg - * Context data (struct mlx4_txq_mp2mr_mbuf_check_data). Contains the - * return value. - * @param[in] obj - * Object address. - * @param index - * Object index, unused. - */ -static void -mlx4_txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, - uint32_t index) -{ - struct txq_mp2mr_mbuf_check_data *data = arg; - struct rte_mbuf *buf = obj; - - (void)index; - /* - * Check whether mbuf structure fits element size and whether mempool - * pointer is valid. - */ - if (sizeof(*buf) > mp->elt_size || buf->pool != mp) - data->ret = -1; -} - -/** - * Iterator function for rte_mempool_walk() to register existing mempools and - * fill the MP to MR cache of a Tx queue. - * - * @param[in] mp - * Memory Pool to register. - * @param *arg - * Pointer to Tx queue structure. - */ -static void -mlx4_txq_mp2mr_iter(struct rte_mempool *mp, void *arg) -{ - struct txq *txq = arg; - struct txq_mp2mr_mbuf_check_data data = { - .ret = 0, - }; - - /* Register mempool only if the first element looks like a mbuf. */ - if (rte_mempool_obj_iter(mp, mlx4_txq_mp2mr_mbuf_check, &data) == 0 || - data.ret == -1) - return; - mlx4_txq_mp2mr(txq, mp); -} - /** * Retrieves information needed in order to directly access the Tx queue. * @@ -144,9 +86,9 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv) uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift); /* Continuous headroom size bytes must always stay freed. */ sq->remain_size = sq->size - headroom_size; - sq->owner_opcode = MLX4_OPCODE_SEND | (0 << MLX4_SQ_OWNER_BIT); + sq->owner_opcode = MLX4_OPCODE_SEND | (0u << MLX4_SQ_OWNER_BIT); sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL | - (0 << MLX4_SQ_OWNER_BIT)); + (0u << MLX4_SQ_OWNER_BIT)); sq->db = dqp->sdb; sq->doorbell_qpn = dqp->doorbell_qpn; cq->buf = dcq->buf.buf; @@ -179,26 +121,6 @@ mlx4_get_tx_port_offloads(struct priv *priv) return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param priv - * Pointer to private structure. - * @param requested - * Per-queue offloads configuration. - * - * @return - * Nonzero when configuration is valid. - */ -static int -mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested) -{ - uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads; - uint64_t supported = mlx4_get_tx_port_offloads(priv); - - return !((mandatory ^ requested) & supported); -} - /** * DPDK callback to configure a Tx queue. * @@ -246,23 +168,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, }, }; int ret; + uint64_t offloads; + + offloads = conf->offloads | dev->data->dev_conf.txmode.offloads; DEBUG("%p: configuring queue %u for %u descriptors", (void *)dev, idx, desc); - /* - * Don't verify port offloads for application which - * use the old API. - */ - if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && - !mlx4_check_tx_queue_offloads(priv, conf->offloads)) { - rte_errno = ENOTSUP; - ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port " - "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, - (void *)dev, conf->offloads, - dev->data->dev_conf.txmode.offloads, - mlx4_get_tx_port_offloads(priv)); - return -rte_errno; - } + if (idx >= dev->data->nb_tx_queues) { rte_errno = EOVERFLOW; ERROR("%p: queue index out of range (%u >= %u)", @@ -313,11 +225,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, .elts_comp_cd_init = RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4), .csum = priv->hw_csum && - (conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | + (offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM)), .csum_l2tun = priv->hw_csum_l2tun && - (conf->offloads & + (offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM), /* Enable Tx loopback for VF devices. */ .lb = !!priv->vf, @@ -404,8 +316,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, /* Save first wqe pointer in the first element. */ (&(*txq->elts)[0])->wqe = (volatile struct mlx4_wqe_ctrl_seg *)txq->msq.buf; - /* Pre-register known mempools. */ - rte_mempool_walk(mlx4_txq_mp2mr_iter, txq); + if (mlx4_mr_btree_init(&txq->mr_ctrl.cache_bh, + MLX4_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } + /* Save pointer of global generation number to check memory event. */ + txq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen; DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq); dev->data->tx_queues[idx] = txq; return 0; @@ -446,11 +363,6 @@ mlx4_tx_queue_release(void *dpdk_txq) claim_zero(mlx4_glue->destroy_qp(txq->qp)); if (txq->cq) claim_zero(mlx4_glue->destroy_cq(txq->cq)); - for (i = 0; i != RTE_DIM(txq->mp2mr); ++i) { - if (!txq->mp2mr[i].mp) - break; - assert(txq->mp2mr[i].mr); - mlx4_mr_put(txq->mp2mr[i].mr); - } + mlx4_mr_btree_free(&txq->mr_ctrl.cache_bh); rte_free(txq); } diff --git a/drivers/net/mlx4/mlx4_utils.c b/drivers/net/mlx4/mlx4_utils.c index d10812ec..a727d703 100644 --- a/drivers/net/mlx4/mlx4_utils.c +++ b/drivers/net/mlx4/mlx4_utils.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ /** diff --git a/drivers/net/mlx4/mlx4_utils.h b/drivers/net/mlx4/mlx4_utils.h index 9fdbacad..86abb3b7 100644 --- a/drivers/net/mlx4/mlx4_utils.h +++ b/drivers/net/mlx4/mlx4_utils.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef MLX4_UTILS_H_ diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index 3bc9736c..8a5229e6 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -1,33 +1,6 @@ -# BSD LICENSE -# +# SPDX-License-Identifier: BSD-3-Clause # Copyright 2015 6WIND S.A. -# Copyright 2015 Mellanox. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of 6WIND S.A. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# Copyright 2015 Mellanox Technologies, Ltd include $(RTE_SDK)/mk/rte.vars.mk @@ -35,7 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_pmd_mlx5.a LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION) LIB_GLUE_BASE = librte_pmd_mlx5_glue.so -LIB_GLUE_VERSION = 18.02.0 +LIB_GLUE_VERSION = 18.05.0 # Sources. SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c @@ -59,6 +32,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y) INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE) @@ -92,6 +66,9 @@ CFLAGS += -Wno-error=cast-qual EXPORT_MAP := rte_pmd_mlx5_version.map LIBABIVER := 1 +# memseg walk is not part of stable API +CFLAGS += -DALLOW_EXPERIMENTAL_API + # DEBUG which is usually provided on the command-line may enable # CONFIG_RTE_LIBRTE_MLX5_DEBUG. ifeq ($(DEBUG),1) @@ -105,10 +82,6 @@ else CFLAGS += -DNDEBUG -UPEDANTIC endif -ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE -CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE) -endif - include $(RTE_SDK)/mk/rte.lib.mk # Generate and clean-up mlx5_autoconf.h. @@ -125,15 +98,30 @@ mlx5_autoconf.h.new: FORCE mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $Q $(RM) -f -- '$@' $Q sh -- '$<' '$@' \ - HAVE_IBV_DEVICE_VXLAN_SUPPORT \ + HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \ + infiniband/mlx5dv.h \ + enum MLX5DV_CONTEXT_MASK_STRIDING_RQ \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IBV_DEVICE_TUNNEL_SUPPORT \ + infiniband/mlx5dv.h \ + enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IBV_DEVICE_MPLS_SUPPORT \ infiniband/verbs.h \ - enum IBV_DEVICE_VXLAN_SUPPORT \ + enum IBV_FLOW_SPEC_MPLS \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_WQ_FLAG_RX_END_PADDING \ infiniband/verbs.h \ enum IBV_WQ_FLAG_RX_END_PADDING \ $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IBV_MLX5_MOD_SWP \ + infiniband/mlx5dv.h \ + type 'struct mlx5dv_sw_parsing_caps' \ + $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_MLX5_MOD_MPW \ infiniband/mlx5dv.h \ @@ -181,8 +169,13 @@ ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y) $(LIB): $(LIB_GLUE) +ifeq ($(LINK_USING_CC),1) +GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS)) +else +GLUE_LDFLAGS := $(LDFLAGS) +endif $(LIB_GLUE): mlx5_glue.o - $Q $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) \ + $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \ -Wl,-h,$(LIB_GLUE) \ -s -shared -o $@ $< -libverbs -lmlx5 diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 6c0985bd..c933e274 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -13,6 +13,7 @@ #include #include #include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ @@ -33,6 +34,8 @@ #include #include #include +#include +#include #include "mlx5.h" #include "mlx5_utils.h" @@ -40,10 +43,23 @@ #include "mlx5_autoconf.h" #include "mlx5_defs.h" #include "mlx5_glue.h" +#include "mlx5_mr.h" /* Device parameter to enable RX completion queue compression. */ #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" +/* Device parameter to enable Multi-Packet Rx queue. */ +#define MLX5_RX_MPRQ_EN "mprq_en" + +/* Device parameter to configure log 2 of the number of strides for MPRQ. */ +#define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" + +/* Device parameter to limit the size of memcpy'd packet for MPRQ. */ +#define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" + +/* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ +#define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" + /* Device parameter to configure inline send. */ #define MLX5_TXQ_INLINE "txq_inline" @@ -68,6 +84,12 @@ /* Device parameter to enable hardware Rx vector. */ #define MLX5_RX_VEC_EN "rx_vec_en" +/* Allow L3 VXLAN flow creation. */ +#define MLX5_L3_VXLAN_EN "l3_vxlan_en" + +/* Activate Netlink support in VF mode. */ +#define MLX5_VF_NL_EN "vf_nl_en" + #ifndef HAVE_IBV_MLX5_MOD_MPW #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) @@ -77,6 +99,50 @@ #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) #endif +static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; + +/* Shared memory between primary and secondary processes. */ +struct mlx5_shared_data *mlx5_shared_data; + +/* Spinlock for mlx5_shared_data allocation. */ +static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; + +/** Driver-specific log messages type. */ +int mlx5_logtype; + +/** + * Prepare shared data between primary and secondary process. + */ +static void +mlx5_prepare_shared_data(void) +{ + const struct rte_memzone *mz; + + rte_spinlock_lock(&mlx5_shared_data_lock); + if (mlx5_shared_data == NULL) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* Allocate shared memory. */ + mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, + sizeof(*mlx5_shared_data), + SOCKET_ID_ANY, 0); + } else { + /* Lookup allocated shared memory. */ + mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA); + } + if (mz == NULL) + rte_panic("Cannot allocate mlx5 shared data\n"); + mlx5_shared_data = mz->addr; + /* Initialize shared data. */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + LIST_INIT(&mlx5_shared_data->mem_event_cb_list); + rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock); + } + rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", + mlx5_mr_mem_event_cb, NULL); + } + rte_spinlock_unlock(&mlx5_shared_data_lock); +} + /** * Retrieve integer value from environment variable. * @@ -108,7 +174,7 @@ mlx5_getenv_int(const char *name) * A pointer to the callback data. * * @return - * a pointer to the allocate space. + * Allocated buffer, NULL otherwise and rte_errno is set. */ static void * mlx5_alloc_verbs_buf(size_t size, void *data) @@ -130,7 +196,8 @@ mlx5_alloc_verbs_buf(size_t size, void *data) } assert(data != NULL); ret = rte_malloc_socket(__func__, size, alignment, socket); - DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret); + if (!ret && size) + rte_errno = ENOMEM; return ret; } @@ -146,7 +213,6 @@ static void mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) { assert(data != NULL); - DEBUG("Extern free request: %p", ptr); rte_free(ptr); } @@ -165,13 +231,12 @@ mlx5_dev_close(struct rte_eth_dev *dev) unsigned int i; int ret; - priv_lock(priv); - DEBUG("%p: closing device \"%s\"", - (void *)dev, - ((priv->ctx != NULL) ? priv->ctx->device->name : "")); + DRV_LOG(DEBUG, "port %u closing device \"%s\"", + dev->data->port_id, + ((priv->ctx != NULL) ? priv->ctx->device->name : "")); /* In case mlx5_dev_stop() has not been called. */ - priv_dev_interrupt_handler_uninstall(priv, dev); - priv_dev_traffic_disable(priv, dev); + mlx5_dev_interrupt_handler_uninstall(dev); + mlx5_traffic_disable(dev); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; @@ -179,7 +244,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_rx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) - mlx5_priv_rxq_release(priv, i); + mlx5_rxq_release(dev, i); priv->rxqs_n = 0; priv->rxqs = NULL; } @@ -187,10 +252,13 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_tx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->txqs_n); ++i) - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(dev, i); priv->txqs_n = 0; priv->txqs = NULL; } + mlx5_flow_delete_drop_queue(dev); + mlx5_mprq_free_mp(dev); + mlx5_mr_release(dev); if (priv->pd != NULL) { assert(priv->ctx != NULL); claim_zero(mlx5_glue->dealloc_pd(priv->pd)); @@ -202,32 +270,39 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (priv->reta_idx != NULL) rte_free(priv->reta_idx); if (priv->primary_socket) - priv_socket_uninit(priv); - ret = mlx5_priv_hrxq_ibv_verify(priv); - if (ret) - WARN("%p: some Hash Rx queue still remain", (void *)priv); - ret = mlx5_priv_ind_table_ibv_verify(priv); + mlx5_socket_uninit(dev); + if (priv->config.vf) + mlx5_nl_mac_addr_flush(dev); + if (priv->nl_socket >= 0) + close(priv->nl_socket); + ret = mlx5_hrxq_ibv_verify(dev); if (ret) - WARN("%p: some Indirection table still remain", (void *)priv); - ret = mlx5_priv_rxq_ibv_verify(priv); + DRV_LOG(WARNING, "port %u some hash Rx queue still remain", + dev->data->port_id); + ret = mlx5_ind_table_ibv_verify(dev); if (ret) - WARN("%p: some Verbs Rx queue still remain", (void *)priv); - ret = mlx5_priv_rxq_verify(priv); + DRV_LOG(WARNING, "port %u some indirection table still remain", + dev->data->port_id); + ret = mlx5_rxq_ibv_verify(dev); if (ret) - WARN("%p: some Rx Queues still remain", (void *)priv); - ret = mlx5_priv_txq_ibv_verify(priv); + DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain", + dev->data->port_id); + ret = mlx5_rxq_verify(dev); if (ret) - WARN("%p: some Verbs Tx queue still remain", (void *)priv); - ret = mlx5_priv_txq_verify(priv); + DRV_LOG(WARNING, "port %u some Rx queues still remain", + dev->data->port_id); + ret = mlx5_txq_ibv_verify(dev); if (ret) - WARN("%p: some Tx Queues still remain", (void *)priv); - ret = priv_flow_verify(priv); + DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", + dev->data->port_id); + ret = mlx5_txq_verify(dev); if (ret) - WARN("%p: some flows still remain", (void *)priv); - ret = priv_mr_verify(priv); + DRV_LOG(WARNING, "port %u some Tx queues still remain", + dev->data->port_id); + ret = mlx5_flow_verify(dev); if (ret) - WARN("%p: some Memory Region still remain", (void *)priv); - priv_unlock(priv); + DRV_LOG(WARNING, "port %u some flows still remain", + dev->data->port_id); memset(priv, 0, sizeof(*priv)); } @@ -260,6 +335,7 @@ const struct eth_dev_ops mlx5_dev_ops = { .mac_addr_remove = mlx5_mac_addr_remove, .mac_addr_add = mlx5_mac_addr_add, .mac_addr_set = mlx5_mac_addr_set, + .set_mc_addr_list = mlx5_set_mc_addr_list, .mtu_set = mlx5_dev_set_mtu, .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, .vlan_offload_set = mlx5_vlan_offload_set, @@ -312,6 +388,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = { .mac_addr_remove = mlx5_mac_addr_remove, .mac_addr_add = mlx5_mac_addr_add, .mac_addr_set = mlx5_mac_addr_set, + .set_mc_addr_list = mlx5_set_mc_addr_list, .mtu_set = mlx5_dev_set_mtu, .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, .vlan_offload_set = mlx5_vlan_offload_set, @@ -367,7 +444,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr) * User data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_args_check(const char *key, const char *val, void *opaque) @@ -378,11 +455,20 @@ mlx5_args_check(const char *key, const char *val, void *opaque) errno = 0; tmp = strtoul(val, NULL, 0); if (errno) { - WARN("%s: \"%s\" is not a valid integer", key, val); - return errno; + rte_errno = errno; + DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); + return -rte_errno; } if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { config->cqe_comp = !!tmp; + } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { + config->mprq.enabled = !!tmp; + } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { + config->mprq.stride_num_n = tmp; + } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { + config->mprq.max_memcpy_len = tmp; + } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { + config->mprq.min_rxqs_num = tmp; } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { config->txq_inline = tmp; } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { @@ -397,9 +483,14 @@ mlx5_args_check(const char *key, const char *val, void *opaque) config->tx_vec_en = !!tmp; } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { config->rx_vec_en = !!tmp; + } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { + config->l3_vxlan_en = !!tmp; + } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { + config->vf_nl_en = !!tmp; } else { - WARN("%s: unknown parameter", key); - return -EINVAL; + DRV_LOG(WARNING, "%s: unknown parameter", key); + rte_errno = EINVAL; + return -rte_errno; } return 0; } @@ -413,13 +504,17 @@ mlx5_args_check(const char *key, const char *val, void *opaque) * Device arguments structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) { const char **params = (const char *[]){ MLX5_RXQ_CQE_COMP_EN, + MLX5_RX_MPRQ_EN, + MLX5_RX_MPRQ_LOG_STRIDE_NUM, + MLX5_RX_MPRQ_MAX_MEMCPY_LEN, + MLX5_RXQS_MIN_MPRQ, MLX5_TXQ_INLINE, MLX5_TXQS_MIN_INLINE, MLX5_TXQ_MPW_EN, @@ -427,6 +522,8 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) MLX5_TXQ_MAX_INLINE_LEN, MLX5_TX_VEC_EN, MLX5_RX_VEC_EN, + MLX5_L3_VXLAN_EN, + MLX5_VF_NL_EN, NULL, }; struct rte_kvargs *kvlist; @@ -444,9 +541,10 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) if (rte_kvargs_count(kvlist, params[i])) { ret = rte_kvargs_process(kvlist, params[i], mlx5_args_check, config); - if (ret != 0) { + if (ret) { + rte_errno = EINVAL; rte_kvargs_free(kvlist); - return ret; + return -rte_errno; } } } @@ -465,50 +563,60 @@ static struct rte_pci_driver mlx5_driver; */ static void *uar_base; +static int +find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused, + const struct rte_memseg *ms, void *arg) +{ + void **addr = arg; + + if (*addr == NULL) + *addr = ms->addr; + else + *addr = RTE_MIN(*addr, ms->addr); + + return 0; +} + /** * Reserve UAR address space for primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_uar_init_primary(struct priv *priv) +mlx5_uar_init_primary(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; void *addr = (void *)0; - int i; - const struct rte_mem_config *mcfg; - int ret; if (uar_base) { /* UAR address space mapped. */ priv->uar_base = uar_base; return 0; } /* find out lower bound of hugepage segments */ - mcfg = rte_eal_get_configuration()->mem_config; - for (i = 0; i < RTE_MAX_MEMSEG && mcfg->memseg[i].addr; i++) { - if (addr) - addr = RTE_MIN(addr, mcfg->memseg[i].addr); - else - addr = mcfg->memseg[i].addr; - } + rte_memseg_walk(find_lower_va_bound, &addr); + /* keep distance to hugepages to minimize potential conflicts. */ addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE); /* anonymous mmap, no real memory consumption. */ addr = mmap(addr, MLX5_UAR_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { - ERROR("Failed to reserve UAR address space, please adjust " - "MLX5_UAR_SIZE or try --base-virtaddr"); - ret = ENOMEM; - return ret; + DRV_LOG(ERR, + "port %u failed to reserve UAR address space, please" + " adjust MLX5_UAR_SIZE or try --base-virtaddr", + dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; } /* Accept either same addr or a new addr returned from mmap if target * range occupied. */ - INFO("Reserved UAR address space: %p", addr); + DRV_LOG(INFO, "port %u reserved UAR address space: %p", + dev->data->port_id, addr); priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */ uar_base = addr; /* process local, don't reserve again. */ return 0; @@ -518,17 +626,17 @@ priv_uar_init_primary(struct priv *priv) * Reserve UAR address space for secondary process, align with * primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_uar_init_secondary(struct priv *priv) +mlx5_uar_init_secondary(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; void *addr; - int ret; assert(priv->uar_base); if (uar_base) { /* already reserved. */ @@ -539,20 +647,23 @@ priv_uar_init_secondary(struct priv *priv) addr = mmap(priv->uar_base, MLX5_UAR_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { - ERROR("UAR mmap failed: %p size: %llu", - priv->uar_base, MLX5_UAR_SIZE); - ret = ENXIO; - return ret; + DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu", + dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE); + rte_errno = ENXIO; + return -rte_errno; } if (priv->uar_base != addr) { - ERROR("UAR address %p size %llu occupied, please adjust " - "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr", - priv->uar_base, MLX5_UAR_SIZE); - ret = ENXIO; - return ret; + DRV_LOG(ERR, + "port %u UAR address %p size %llu occupied, please" + " adjust MLX5_UAR_OFFSET or try EAL parameter" + " --base-virtaddr", + dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE); + rte_errno = ENXIO; + return -rte_errno; } uar_base = addr; /* process local, don't reserve again */ - INFO("Reserved UAR address space: %p", addr); + DRV_LOG(INFO, "port %u reserved UAR address space: %p", + dev->data->port_id, addr); return 0; } @@ -568,45 +679,57 @@ priv_uar_init_secondary(struct priv *priv) * PCI device information. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) { - struct ibv_device **list; + struct ibv_device **list = NULL; struct ibv_device *ibv_dev; int err = 0; struct ibv_context *attr_ctx = NULL; struct ibv_device_attr_ex device_attr; - unsigned int sriov; + unsigned int vf = 0; unsigned int mps; unsigned int cqe_comp; unsigned int tunnel_en = 0; + unsigned int mpls_en = 0; + unsigned int swp = 0; + unsigned int verb_priorities = 0; + unsigned int mprq = 0; + unsigned int mprq_min_stride_size_n = 0; + unsigned int mprq_max_stride_size_n = 0; + unsigned int mprq_min_stride_num_n = 0; + unsigned int mprq_max_stride_num_n = 0; int idx; int i; - struct mlx5dv_context attrs_out; + struct mlx5dv_context attrs_out = {0}; #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT struct ibv_counter_set_description cs_desc; #endif - (void)pci_drv; + /* Prepare shared data between primary and secondary process. */ + mlx5_prepare_shared_data(); assert(pci_drv == &mlx5_driver); /* Get mlx5_dev[] index. */ idx = mlx5_dev_idx(&pci_dev->addr); if (idx == -1) { - ERROR("this driver cannot support any more adapters"); - return -ENOMEM; + DRV_LOG(ERR, "this driver cannot support any more adapters"); + err = ENOMEM; + goto error; } - DEBUG("using driver device index %d", idx); - + DRV_LOG(DEBUG, "using driver device index %d", idx); /* Save PCI address. */ mlx5_dev[idx].pci_addr = pci_dev->addr; list = mlx5_glue->get_device_list(&i); if (list == NULL) { assert(errno); + err = errno; if (errno == ENOSYS) - ERROR("cannot list devices, is ib_uverbs loaded?"); - return -errno; + DRV_LOG(ERR, + "cannot list devices, is ib_uverbs loaded?"); + goto error; } assert(i >= 0); /* @@ -617,7 +740,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct rte_pci_addr pci_addr; --i; - DEBUG("checking device \"%s\"", list[i]->name); + DRV_LOG(DEBUG, "checking device \"%s\"", list[i]->name); if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr)) continue; if ((pci_dev->addr.domain != pci_addr.domain) || @@ -625,7 +748,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) (pci_dev->addr.devid != pci_addr.devid) || (pci_dev->addr.function != pci_addr.function)) continue; - sriov = ((pci_dev->id.device_id == + DRV_LOG(INFO, "PCI information matches, using device \"%s\"", + list[i]->name); + vf = ((pci_dev->id.device_id == PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || (pci_dev->id.device_id == PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) || @@ -633,70 +758,121 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) || (pci_dev->id.device_id == PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)); - switch (pci_dev->id.device_id) { - case PCI_DEVICE_ID_MELLANOX_CONNECTX4: - tunnel_en = 1; - break; - case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: - case PCI_DEVICE_ID_MELLANOX_CONNECTX5: - case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: - case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: - case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: - tunnel_en = 1; - break; - default: - break; - } - INFO("PCI information matches, using device \"%s\"" - " (SR-IOV: %s)", - list[i]->name, - sriov ? "true" : "false"); attr_ctx = mlx5_glue->open_device(list[i]); - err = errno; + rte_errno = errno; + err = rte_errno; break; } if (attr_ctx == NULL) { - mlx5_glue->free_device_list(list); switch (err) { case 0: - ERROR("cannot access device, is mlx5_ib loaded?"); - return -ENODEV; + DRV_LOG(ERR, + "cannot access device, is mlx5_ib loaded?"); + err = ENODEV; + break; case EINVAL: - ERROR("cannot use device, are drivers up to date?"); - return -EINVAL; + DRV_LOG(ERR, + "cannot use device, are drivers up to date?"); + break; } - assert(err > 0); - return -err; + goto error; } ibv_dev = list[i]; - - DEBUG("device opened"); + DRV_LOG(DEBUG, "device opened"); +#ifdef HAVE_IBV_MLX5_MOD_SWP + attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_SWP; +#endif /* * Multi-packet send is supported by ConnectX-4 Lx PF as well * as all ConnectX-5 devices. */ +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; +#endif +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; +#endif mlx5_glue->dv_query_device(attr_ctx, &attrs_out); if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { - DEBUG("Enhanced MPW is supported"); + DRV_LOG(DEBUG, "enhanced MPW is supported"); mps = MLX5_MPW_ENHANCED; } else { - DEBUG("MPW is supported"); + DRV_LOG(DEBUG, "MPW is supported"); mps = MLX5_MPW; } } else { - DEBUG("MPW isn't supported"); + DRV_LOG(DEBUG, "MPW isn't supported"); mps = MLX5_MPW_DISABLED; } +#ifdef HAVE_IBV_MLX5_MOD_SWP + if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_SWP) + swp = attrs_out.sw_parsing_caps.sw_parsing_offloads; + DRV_LOG(DEBUG, "SWP support: %u", swp); +#endif +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) { + struct mlx5dv_striding_rq_caps mprq_caps = + attrs_out.striding_rq_caps; + + DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d", + mprq_caps.min_single_stride_log_num_of_bytes); + DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d", + mprq_caps.max_single_stride_log_num_of_bytes); + DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d", + mprq_caps.min_single_wqe_log_num_of_strides); + DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d", + mprq_caps.max_single_wqe_log_num_of_strides); + DRV_LOG(DEBUG, "\tsupported_qpts: %d", + mprq_caps.supported_qpts); + DRV_LOG(DEBUG, "device supports Multi-Packet RQ"); + mprq = 1; + mprq_min_stride_size_n = + mprq_caps.min_single_stride_log_num_of_bytes; + mprq_max_stride_size_n = + mprq_caps.max_single_stride_log_num_of_bytes; + mprq_min_stride_num_n = + mprq_caps.min_single_wqe_log_num_of_strides; + mprq_max_stride_num_n = + mprq_caps.max_single_wqe_log_num_of_strides; + } +#endif if (RTE_CACHE_LINE_SIZE == 128 && !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) cqe_comp = 0; else cqe_comp = 1; - if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { + tunnel_en = ((attrs_out.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && + (attrs_out.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE)); + } + DRV_LOG(DEBUG, "tunnel offloading is %ssupported", + tunnel_en ? "" : "not "); +#else + DRV_LOG(WARNING, + "tunnel offloading disabled due to old OFED/rdma-core version"); +#endif +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + mpls_en = ((attrs_out.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) && + (attrs_out.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP)); + DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported", + mpls_en ? "" : "not "); +#else + DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to" + " old OFED/rdma-core version or firmware configuration"); +#endif + err = mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr); + if (err) { + DEBUG("ibv_query_device_ex() failed"); goto error; - INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt); - + } + DRV_LOG(INFO, "%u port(s) detected", + device_attr.orig_attr.phys_port_cnt); for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) { char name[RTE_ETH_NAME_MAX_LEN]; int len; @@ -706,21 +882,29 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_port_attr port_attr; struct ibv_pd *pd = NULL; struct priv *priv = NULL; - struct rte_eth_dev *eth_dev; + struct rte_eth_dev *eth_dev = NULL; struct ibv_device_attr_ex device_attr_ex; struct ether_addr mac; - uint16_t num_vfs = 0; - struct ibv_device_attr_ex device_attr; struct mlx5_dev_config config = { .cqe_comp = cqe_comp, .mps = mps, .tunnel_en = tunnel_en, + .mpls_en = mpls_en, .tx_vec_en = 1, .rx_vec_en = 1, .mpw_hdr_dseg = 0, .txq_inline = MLX5_ARG_UNSET, .txqs_inline = MLX5_ARG_UNSET, .inline_max_packet_sz = MLX5_ARG_UNSET, + .vf_nl_en = 1, + .swp = !!swp, + .mprq = { + .enabled = 0, /* Disabled by default. */ + .stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, + mprq_min_stride_num_n), + .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN, + .min_rxqs_num = MLX5_MPRQ_MIN_RXQS, + }, }; len = snprintf(name, sizeof(name), PCI_PRI_FMT, @@ -728,94 +912,87 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) pci_dev->addr.devid, pci_dev->addr.function); if (device_attr.orig_attr.phys_port_cnt > 1) snprintf(name + len, sizeof(name), " port %u", i); - mlx5_dev[idx].ports |= test; - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { eth_dev = rte_eth_dev_attach_secondary(name); if (eth_dev == NULL) { - ERROR("can not attach rte ethdev"); - err = ENOMEM; + DRV_LOG(ERR, "can not attach rte ethdev"); + rte_errno = ENOMEM; + err = rte_errno; goto error; } eth_dev->device = &pci_dev->device; eth_dev->dev_ops = &mlx5_dev_sec_ops; - priv = eth_dev->data->dev_private; - err = priv_uar_init_secondary(priv); - if (err < 0) { - err = -err; + err = mlx5_uar_init_secondary(eth_dev); + if (err) { + err = rte_errno; goto error; } /* Receive command fd from primary process */ - err = priv_socket_connect(priv); + err = mlx5_socket_connect(eth_dev); if (err < 0) { - err = -err; + err = rte_errno; goto error; } /* Remap UAR for Tx queues. */ - err = priv_tx_uar_remap(priv, err); - if (err) + err = mlx5_tx_uar_remap(eth_dev, err); + if (err) { + err = rte_errno; goto error; + } /* * Ethdev pointer is still required as input since * the primary device is not accessible from the * secondary process. */ eth_dev->rx_pkt_burst = - priv_select_rx_function(priv, eth_dev); + mlx5_select_rx_function(eth_dev); eth_dev->tx_pkt_burst = - priv_select_tx_function(priv, eth_dev); + mlx5_select_tx_function(eth_dev); + rte_eth_dev_probing_finish(eth_dev); continue; } - - DEBUG("using port %u (%08" PRIx32 ")", port, test); - + DRV_LOG(DEBUG, "using port %u (%08" PRIx32 ")", port, test); ctx = mlx5_glue->open_device(ibv_dev); if (ctx == NULL) { err = ENODEV; goto port_error; } - - mlx5_glue->query_device_ex(ctx, NULL, &device_attr); /* Check port status. */ err = mlx5_glue->query_port(ctx, port, &port_attr); if (err) { - ERROR("port query failed: %s", strerror(err)); + DRV_LOG(ERR, "port query failed: %s", strerror(err)); goto port_error; } - if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { - ERROR("port %d is not configured in Ethernet mode", - port); + DRV_LOG(ERR, + "port %d is not configured in Ethernet mode", + port); err = EINVAL; goto port_error; } - if (port_attr.state != IBV_PORT_ACTIVE) - DEBUG("port %d is not active: \"%s\" (%d)", - port, mlx5_glue->port_state_str(port_attr.state), - port_attr.state); - + DRV_LOG(DEBUG, "port %d is not active: \"%s\" (%d)", + port, + mlx5_glue->port_state_str(port_attr.state), + port_attr.state); /* Allocate protection domain. */ pd = mlx5_glue->alloc_pd(ctx); if (pd == NULL) { - ERROR("PD allocation failure"); + DRV_LOG(ERR, "PD allocation failure"); err = ENOMEM; goto port_error; } - mlx5_dev[idx].ports |= test; - /* from rte_ethdev.c */ priv = rte_zmalloc("ethdev private structure", sizeof(*priv), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - ERROR("priv allocation failure"); + DRV_LOG(ERR, "priv allocation failure"); err = ENOMEM; goto port_error; } - priv->ctx = ctx; strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path, sizeof(priv->ibdev_path)); @@ -825,34 +1002,27 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->mtu = ETHER_MTU; err = mlx5_args(&config, pci_dev->device.devargs); if (err) { - ERROR("failed to process device arguments: %s", - strerror(err)); + DRV_LOG(ERR, "failed to process device arguments: %s", + strerror(err)); + err = rte_errno; goto port_error; } - if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) { - ERROR("ibv_query_device_ex() failed"); + err = mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex); + if (err) { + DRV_LOG(ERR, "ibv_query_device_ex() failed"); goto port_error; } - config.hw_csum = !!(device_attr_ex.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM); - DEBUG("checksum offloading is %ssupported", - (config.hw_csum ? "" : "not ")); - -#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT - config.hw_csum_l2tun = - !!(exp_device_attr.exp_device_cap_flags & - IBV_DEVICE_VXLAN_SUPPORT); -#endif - DEBUG("Rx L2 tunnel checksum offloads are %ssupported", - (config.hw_csum_l2tun ? "" : "not ")); - + DRV_LOG(DEBUG, "checksum offloading is %ssupported", + (config.hw_csum ? "" : "not ")); #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT config.flow_counter_en = !!(device_attr.max_counter_sets); mlx5_glue->describe_counter_set(ctx, 0, &cs_desc); - DEBUG("counter type = %d, num of cs = %ld, attributes = %d", - cs_desc.counter_type, cs_desc.num_of_cs, - cs_desc.attributes); + DRV_LOG(DEBUG, + "counter type = %d, num of cs = %ld, attributes = %d", + cs_desc.counter_type, cs_desc.num_of_cs, + cs_desc.attributes); #endif config.ind_table_max_size = device_attr_ex.rss_caps.max_rwq_indirection_table_size; @@ -861,26 +1031,25 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; - DEBUG("maximum RX indirection table size is %u", - config.ind_table_max_size); + DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", + config.ind_table_max_size); config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps & IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); - DEBUG("VLAN stripping is %ssupported", - (config.hw_vlan_strip ? "" : "not ")); + DRV_LOG(DEBUG, "VLAN stripping is %ssupported", + (config.hw_vlan_strip ? "" : "not ")); config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps & IBV_RAW_PACKET_CAP_SCATTER_FCS); - DEBUG("FCS stripping configuration is %ssupported", - (config.hw_fcs_strip ? "" : "not ")); + DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", + (config.hw_fcs_strip ? "" : "not ")); #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align; #endif - DEBUG("hardware RX end alignment padding is %ssupported", - (config.hw_padding ? "" : "not ")); - - priv_get_num_vfs(priv, &num_vfs); - config.sriov = (num_vfs || sriov); + DRV_LOG(DEBUG, + "hardware Rx end alignment padding is %ssupported", + (config.hw_padding ? "" : "not ")); + config.vf = vf; config.tso = ((device_attr_ex.tso_caps.max_tso > 0) && (device_attr_ex.tso_caps.supported_qpts & (1 << IBV_QPT_RAW_PACKET))); @@ -888,71 +1057,106 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) config.tso_max_payload_sz = device_attr_ex.tso_caps.max_tso; if (config.mps && !mps) { - ERROR("multi-packet send not supported on this device" - " (" MLX5_TXQ_MPW_EN ")"); + DRV_LOG(ERR, + "multi-packet send not supported on this device" + " (" MLX5_TXQ_MPW_EN ")"); err = ENOTSUP; goto port_error; } - INFO("%sMPS is %s", - config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "", - config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); + DRV_LOG(INFO, "%s MPS is %s", + config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "", + config.mps != MLX5_MPW_DISABLED ? "enabled" : + "disabled"); if (config.cqe_comp && !cqe_comp) { - WARN("Rx CQE compression isn't supported"); + DRV_LOG(WARNING, "Rx CQE compression isn't supported"); config.cqe_comp = 0; } - err = priv_uar_init_primary(priv); - if (err) + config.mprq.enabled = config.mprq.enabled && mprq; + if (config.mprq.enabled) { + if (config.mprq.stride_num_n > mprq_max_stride_num_n || + config.mprq.stride_num_n < mprq_min_stride_num_n) { + config.mprq.stride_num_n = + RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N, + mprq_min_stride_num_n); + DRV_LOG(WARNING, + "the number of strides" + " for Multi-Packet RQ is out of range," + " setting default value (%u)", + 1 << config.mprq.stride_num_n); + } + config.mprq.min_stride_size_n = mprq_min_stride_size_n; + config.mprq.max_stride_size_n = mprq_max_stride_size_n; + } + eth_dev = rte_eth_dev_allocate(name); + if (eth_dev == NULL) { + DRV_LOG(ERR, "can not allocate rte ethdev"); + err = ENOMEM; goto port_error; + } + eth_dev->data->dev_private = priv; + priv->dev_data = eth_dev->data; + eth_dev->data->mac_addrs = priv->mac; + eth_dev->device = &pci_dev->device; + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->device->driver = &mlx5_driver.driver; + err = mlx5_uar_init_primary(eth_dev); + if (err) { + err = rte_errno; + goto port_error; + } /* Configure the first MAC address by default. */ - if (priv_get_mac(priv, &mac.addr_bytes)) { - ERROR("cannot get MAC address, is mlx5_en loaded?" - " (errno: %s)", strerror(errno)); + if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { + DRV_LOG(ERR, + "port %u cannot get MAC address, is mlx5_en" + " loaded? (errno: %s)", + eth_dev->data->port_id, strerror(errno)); err = ENODEV; goto port_error; } - INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", - priv->port, - mac.addr_bytes[0], mac.addr_bytes[1], - mac.addr_bytes[2], mac.addr_bytes[3], - mac.addr_bytes[4], mac.addr_bytes[5]); + DRV_LOG(INFO, + "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", + eth_dev->data->port_id, + mac.addr_bytes[0], mac.addr_bytes[1], + mac.addr_bytes[2], mac.addr_bytes[3], + mac.addr_bytes[4], mac.addr_bytes[5]); #ifndef NDEBUG { char ifname[IF_NAMESIZE]; - if (priv_get_ifname(priv, &ifname) == 0) - DEBUG("port %u ifname is \"%s\"", - priv->port, ifname); + if (mlx5_get_ifname(eth_dev, &ifname) == 0) + DRV_LOG(DEBUG, "port %u ifname is \"%s\"", + eth_dev->data->port_id, ifname); else - DEBUG("port %u ifname is unknown", priv->port); + DRV_LOG(DEBUG, "port %u ifname is unknown", + eth_dev->data->port_id); } #endif /* Get actual MTU if possible. */ - priv_get_mtu(priv, &priv->mtu); - DEBUG("port %u MTU is %u", priv->port, priv->mtu); - - eth_dev = rte_eth_dev_allocate(name); - if (eth_dev == NULL) { - ERROR("can not allocate rte ethdev"); - err = ENOMEM; + err = mlx5_get_mtu(eth_dev, &priv->mtu); + if (err) { + err = rte_errno; goto port_error; } - eth_dev->data->dev_private = priv; - eth_dev->data->mac_addrs = priv->mac; - eth_dev->device = &pci_dev->device; - rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->device->driver = &mlx5_driver.driver; + DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, + priv->mtu); /* * Initialize burst functions to prevent crashes before link-up. */ eth_dev->rx_pkt_burst = removed_rx_burst; eth_dev->tx_pkt_burst = removed_tx_burst; - priv->dev = eth_dev; eth_dev->dev_ops = &mlx5_dev_ops; /* Register MAC address. */ claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); + priv->nl_socket = -1; + priv->nl_sn = 0; + if (vf && config.vf_nl_en) { + priv->nl_socket = mlx5_nl_init(RTMGRP_LINK); + if (priv->nl_socket < 0) + priv->nl_socket = -1; + mlx5_nl_mac_addr_sync(eth_dev); + } TAILQ_INIT(&priv->flows); TAILQ_INIT(&priv->ctrl_flows); - /* Hint libmlx5 to use PMD allocator for data plane resources */ struct mlx5dv_ctx_allocators alctr = { .alloc = &mlx5_alloc_verbs_buf, @@ -962,14 +1166,55 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS, (void *)((uintptr_t)&alctr)); - /* Bring Ethernet device up. */ - DEBUG("forcing Ethernet interface up"); - priv_set_flags(priv, ~IFF_UP, IFF_UP); + DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", + eth_dev->data->port_id); + mlx5_set_link_up(eth_dev); + /* + * Even though the interrupt handler is not installed yet, + * interrupts will still trigger on the asyn_fd from + * Verbs context returned by ibv_open_device(). + */ + mlx5_link_update(eth_dev, 0); /* Store device configuration on private structure. */ priv->config = config; + /* Create drop queue. */ + err = mlx5_flow_create_drop_queue(eth_dev); + if (err) { + DRV_LOG(ERR, "port %u drop queue allocation failed: %s", + eth_dev->data->port_id, strerror(rte_errno)); + err = rte_errno; + goto port_error; + } + /* Supported Verbs flow priority number detection. */ + if (verb_priorities == 0) + verb_priorities = mlx5_get_max_verbs_prio(eth_dev); + if (verb_priorities < MLX5_VERBS_FLOW_PRIO_8) { + DRV_LOG(ERR, "port %u wrong Verbs flow priorities: %u", + eth_dev->data->port_id, verb_priorities); + goto port_error; + } + priv->config.max_verbs_prio = verb_priorities; + /* + * Once the device is added to the list of memory event + * callback, its global MR cache table cannot be expanded + * on the fly because of deadlock. If it overflows, lookup + * should be done by searching MR list linearly, which is slow. + */ + err = mlx5_mr_btree_init(&priv->mr.cache, + MLX5_MR_BTREE_CACHE_N * 2, + eth_dev->device->numa_node); + if (err) { + err = rte_errno; + goto port_error; + } + /* Add device to memory callback list. */ + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, + priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + rte_eth_dev_probing_finish(eth_dev); continue; - port_error: if (priv) rte_free(priv); @@ -977,29 +1222,31 @@ port_error: claim_zero(mlx5_glue->dealloc_pd(pd)); if (ctx) claim_zero(mlx5_glue->close_device(ctx)); + if (eth_dev && rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_eth_dev_release_port(eth_dev); break; } - /* * XXX if something went wrong in the loop above, there is a resource * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as * long as the dpdk does not provide a way to deallocate a ethdev and a * way to enumerate the registered ethdevs to free the previous ones. */ - /* no port found, complain */ if (!mlx5_dev[idx].ports) { - err = ENODEV; - goto error; + rte_errno = ENODEV; + err = rte_errno; } - error: if (attr_ctx) claim_zero(mlx5_glue->close_device(attr_ctx)); if (list) mlx5_glue->free_device_list(list); - assert(err >= 0); - return -err; + if (err) { + rte_errno = err; + return -rte_errno; + } + return 0; } static const struct rte_pci_id mlx5_pci_id_map[] = { @@ -1035,6 +1282,10 @@ static const struct rte_pci_id mlx5_pci_id_map[] = { RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) + }, { .vendor_id = 0 } @@ -1051,12 +1302,55 @@ static struct rte_pci_driver mlx5_driver = { #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS +/** + * Suffix RTE_EAL_PMD_PATH with "-glue". + * + * This function performs a sanity check on RTE_EAL_PMD_PATH before + * suffixing its last component. + * + * @param buf[out] + * Output buffer, should be large enough otherwise NULL is returned. + * @param size + * Size of @p out. + * + * @return + * Pointer to @p buf or @p NULL in case suffix cannot be appended. + */ +static char * +mlx5_glue_path(char *buf, size_t size) +{ + static const char *const bad[] = { "/", ".", "..", NULL }; + const char *path = RTE_EAL_PMD_PATH; + size_t len = strlen(path); + size_t off; + int i; + + while (len && path[len - 1] == '/') + --len; + for (off = len; off && path[off - 1] != '/'; --off) + ; + for (i = 0; bad[i]; ++i) + if (!strncmp(path + off, bad[i], (int)(len - off))) + goto error; + i = snprintf(buf, size, "%.*s-glue", (int)len, path); + if (i == -1 || (size_t)i >= size) + goto error; + return buf; +error: + DRV_LOG(ERR, + "unable to append \"-glue\" to last component of" + " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," + " please re-configure DPDK"); + return NULL; +} + /** * Initialization routine for run-time dependency on rdma-core. */ static int mlx5_glue_init(void) { + char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; const char *path[] = { /* * A basic security check is necessary before trusting @@ -1064,7 +1358,13 @@ mlx5_glue_init(void) */ (geteuid() == getuid() && getegid() == getgid() ? getenv("MLX5_GLUE_PATH") : NULL), - RTE_EAL_PMD_PATH, + /* + * When RTE_EAL_PMD_PATH is set, use its glue-suffixed + * variant, otherwise let dlopen() look up libraries on its + * own. + */ + (*RTE_EAL_PMD_PATH ? + mlx5_glue_path(glue_path, sizeof(glue_path)) : ""), }; unsigned int i = 0; void *handle = NULL; @@ -1095,7 +1395,8 @@ mlx5_glue_init(void) break; if (sizeof(name) != (size_t)ret + 1) continue; - DEBUG("looking for rdma-core glue as \"%s\"", name); + DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"", + name); handle = dlopen(name, RTLD_LAZY); break; } while (1); @@ -1107,7 +1408,7 @@ mlx5_glue_init(void) rte_errno = EINVAL; dlmsg = dlerror(); if (dlmsg) - WARN("cannot load glue library: %s", dlmsg); + DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg); goto glue_error; } sym = dlsym(handle, "mlx5_glue"); @@ -1115,7 +1416,7 @@ mlx5_glue_init(void) rte_errno = EINVAL; dlmsg = dlerror(); if (dlmsg) - ERROR("cannot resolve glue symbol: %s", dlmsg); + DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg); goto glue_error; } mlx5_glue = *sym; @@ -1123,9 +1424,9 @@ mlx5_glue_init(void) glue_error: if (handle) dlclose(handle); - WARN("cannot initialize PMD due to missing run-time" - " dependency on rdma-core libraries (libibverbs," - " libmlx5)"); + DRV_LOG(WARNING, + "cannot initialize PMD due to missing run-time dependency on" + " rdma-core libraries (libibverbs, libmlx5)"); return -rte_errno; } @@ -1138,8 +1439,10 @@ RTE_INIT(rte_mlx5_pmd_init); static void rte_mlx5_pmd_init(void) { - /* Build the static table for ptype conversion. */ + /* Build the static tables for Verbs conversion. */ mlx5_set_ptype_table(); + mlx5_set_cksum_table(); + mlx5_set_swp_types_table(); /* * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use * huge pages. Calling ibv_fork_init() during init allows @@ -1165,8 +1468,9 @@ rte_mlx5_pmd_init(void) } #endif if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) { - ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required", - mlx5_glue->version, MLX5_GLUE_VERSION); + DRV_LOG(ERR, + "rdma-core glue \"%s\" mismatch: \"%s\" is required", + mlx5_glue->version, MLX5_GLUE_VERSION); return; } mlx5_glue->fork_init(); @@ -1176,3 +1480,11 @@ rte_mlx5_pmd_init(void) RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); + +/** Initialize driver log type. */ +RTE_INIT(vdev_netvsc_init_log) +{ + mlx5_logtype = rte_log_register("pmd.net.mlx5"); + if (mlx5_logtype >= 0) + rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 965c19f2..997b04a3 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_H_ @@ -26,12 +26,13 @@ #include #include #include -#include +#include #include #include #include #include "mlx5_utils.h" +#include "mlx5_mr.h" #include "mlx5_rxtx.h" #include "mlx5_autoconf.h" #include "mlx5_defs.h" @@ -49,8 +50,19 @@ enum { PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018, PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019, PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a, + PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2, }; +LIST_HEAD(mlx5_dev_list, priv); + +/* Shared memory between primary and secondary processes. */ +struct mlx5_shared_data { + struct mlx5_dev_list mem_event_cb_list; + rte_rwlock_t mem_event_rwlock; +}; + +extern struct mlx5_shared_data *mlx5_shared_data; + struct mlx5_xstats_ctrl { /* Number of device stats. */ uint16_t stats_n; @@ -75,19 +87,34 @@ TAILQ_HEAD(mlx5_flows, rte_flow); */ struct mlx5_dev_config { unsigned int hw_csum:1; /* Checksum offload is supported. */ - unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */ unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */ unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ unsigned int hw_padding:1; /* End alignment padding is supported. */ - unsigned int sriov:1; /* This is a VF or PF with VF devices. */ + unsigned int vf:1; /* This is a VF. */ unsigned int mps:2; /* Multi-packet send supported mode. */ - unsigned int tunnel_en:1; /* Whether tunnel is supported. */ + unsigned int tunnel_en:1; + /* Whether tunnel stateless offloads are supported. */ + unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */ unsigned int flow_counter_en:1; /* Whether flow counter is supported. */ unsigned int cqe_comp:1; /* CQE compression is enabled. */ unsigned int tso:1; /* Whether TSO is supported. */ unsigned int tx_vec_en:1; /* Tx vector is enabled. */ unsigned int rx_vec_en:1; /* Rx vector is enabled. */ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ + unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */ + unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */ + unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */ + struct { + unsigned int enabled:1; /* Whether MPRQ is enabled. */ + unsigned int stride_num_n; /* Number of strides. */ + unsigned int min_stride_size_n; /* Min size of a stride. */ + unsigned int max_stride_size_n; /* Max size of a stride. */ + unsigned int max_memcpy_len; + /* Maximum packet size to memcpy Rx packets. */ + unsigned int min_rxqs_num; + /* Rx queue count threshold to enable MPRQ. */ + } mprq; /* Configurations for Multi-Packet RQ. */ + unsigned int max_verbs_prio; /* Number of Verb flow priorities. */ unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */ unsigned int ind_table_max_size; /* Maximum indirection table size. */ int txq_inline; /* Maximum packet size for inlining. */ @@ -104,6 +131,9 @@ enum mlx5_verbs_alloc_type { MLX5_VERBS_ALLOC_TYPE_RX_QUEUE, }; +/* 8 Verbs priorities. */ +#define MLX5_VERBS_FLOW_PRIO_8 8 + /** * Verbs allocator needs a context to know in the callback which kind of * resources it is allocating. @@ -113,25 +143,30 @@ struct mlx5_verbs_alloc_ctx { const void *obj; /* Pointer to the DPDK object. */ }; +LIST_HEAD(mlx5_mr_list, mlx5_mr); + struct priv { - struct rte_eth_dev *dev; /* Ethernet device of master process. */ + LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */ + struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ struct ibv_context *ctx; /* Verbs context. */ struct ibv_device_attr_ex device_attr; /* Device properties. */ struct ibv_pd *pd; /* Protection Domain. */ char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */ struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */ + BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES); + /* Bit-field of MAC addresses owned by the PMD. */ uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */ unsigned int vlan_filter_n; /* Number of configured VLAN filters. */ /* Device properties. */ uint16_t mtu; /* Configured MTU. */ uint8_t port; /* Physical port number. */ - unsigned int pending_alarm:1; /* An alarm is pending. */ unsigned int isolated:1; /* Whether isolated mode is enabled. */ /* RX/TX queues. */ unsigned int rxqs_n; /* RX queues array size. */ unsigned int txqs_n; /* TX queues array size. */ struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */ struct mlx5_txq_data *(*txqs)[]; /* TX queues. */ + struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ struct rte_intr_handle intr_handle; /* Interrupt handler. */ unsigned int (*reta_idx)[]; /* RETA index table. */ @@ -139,7 +174,13 @@ struct priv { struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */ struct mlx5_flows flows; /* RTE Flow rules. */ struct mlx5_flows ctrl_flows; /* Control flow rules. */ - LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */ + struct { + uint32_t dev_gen; /* Generation number to flush local caches. */ + rte_rwlock_t rwlock; /* MR Lock. */ + struct mlx5_mr_btree cache; /* Global MR cache table. */ + struct mlx5_mr_list mr_list; /* Registered MR list. */ + struct mlx5_mr_list mr_free_list; /* Freed MR list. */ + } mr; LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */ LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */ @@ -149,55 +190,18 @@ struct priv { LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls; uint32_t link_speed_capa; /* Link speed capabilities. */ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ - rte_spinlock_t lock; /* Lock for control functions. */ int primary_socket; /* Unix socket for primary process. */ void *uar_base; /* Reserved address space for UAR mapping */ struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */ struct mlx5_dev_config config; /* Device configuration. */ struct mlx5_verbs_alloc_ctx verbs_alloc_ctx; /* Context for Verbs allocator. */ + int nl_socket; /* Netlink socket. */ + uint32_t nl_sn; /* Netlink message sequence number. */ }; -/** - * Lock private structure to protect it from concurrent access in the - * control path. - * - * @param priv - * Pointer to private structure. - */ -static inline void -priv_lock(struct priv *priv) -{ - rte_spinlock_lock(&priv->lock); -} - -/** - * Try to lock private structure to protect it from concurrent access in the - * control path. - * - * @param priv - * Pointer to private structure. - * - * @return - * 1 if the lock is successfully taken; 0 otherwise. - */ -static inline int -priv_trylock(struct priv *priv) -{ - return rte_spinlock_trylock(&priv->lock); -} - -/** - * Unlock private structure. - * - * @param priv - * Pointer to private structure. - */ -static inline void -priv_unlock(struct priv *priv) -{ - rte_spinlock_unlock(&priv->lock); -} +#define PORT_ID(priv) ((priv)->dev_data->port_id) +#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)]) /* mlx5.c */ @@ -205,131 +209,148 @@ int mlx5_getenv_int(const char *); /* mlx5_ethdev.c */ -struct priv *mlx5_get_priv(struct rte_eth_dev *dev); -int mlx5_is_secondary(void); -int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]); -int priv_ifreq(const struct priv *, int req, struct ifreq *); -int priv_is_ib_cntr(const char *); -int priv_get_cntr_sysfs(struct priv *, const char *, uint64_t *); -int priv_get_num_vfs(struct priv *, uint16_t *); -int priv_get_mtu(struct priv *, uint16_t *); -int priv_set_flags(struct priv *, unsigned int, unsigned int); -int mlx5_dev_configure(struct rte_eth_dev *); -void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *); +int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]); +int mlx5_ifindex(const struct rte_eth_dev *dev); +int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr); +int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu); +int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, + unsigned int flags); +int mlx5_dev_configure(struct rte_eth_dev *dev); +void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info); const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev); -int priv_link_update(struct priv *, int); -int priv_force_link_status_change(struct priv *, int); -int mlx5_link_update(struct rte_eth_dev *, int); -int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t); -int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); -int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *); -int mlx5_ibv_device_to_pci_addr(const struct ibv_device *, - struct rte_pci_addr *); -void mlx5_dev_link_status_handler(void *); -void mlx5_dev_interrupt_handler(void *); -void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *); -void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *); +int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete); +int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status); +int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); +int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, + struct rte_pci_addr *pci_addr); +void mlx5_dev_link_status_handler(void *arg); +void mlx5_dev_interrupt_handler(void *arg); +void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev); +void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev); int mlx5_set_link_down(struct rte_eth_dev *dev); int mlx5_set_link_up(struct rte_eth_dev *dev); int mlx5_is_removed(struct rte_eth_dev *dev); -eth_tx_burst_t priv_select_tx_function(struct priv *, struct rte_eth_dev *); -eth_rx_burst_t priv_select_rx_function(struct priv *, struct rte_eth_dev *); +eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev); +eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev); /* mlx5_mac.c */ -int priv_get_mac(struct priv *, uint8_t (*)[ETHER_ADDR_LEN]); -void mlx5_mac_addr_remove(struct rte_eth_dev *, uint32_t); -int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t, - uint32_t); -void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *); +int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]); +void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); +int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index, uint32_t vmdq); +int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); +int mlx5_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, uint32_t nb_mc_addr); /* mlx5_rss.c */ -int mlx5_rss_hash_update(struct rte_eth_dev *, struct rte_eth_rss_conf *); -int mlx5_rss_hash_conf_get(struct rte_eth_dev *, struct rte_eth_rss_conf *); -int priv_rss_reta_index_resize(struct priv *, unsigned int); -int mlx5_dev_rss_reta_query(struct rte_eth_dev *, - struct rte_eth_rss_reta_entry64 *, uint16_t); -int mlx5_dev_rss_reta_update(struct rte_eth_dev *, - struct rte_eth_rss_reta_entry64 *, uint16_t); +int mlx5_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf); +int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size); +int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); +int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size); /* mlx5_rxmode.c */ -void mlx5_promiscuous_enable(struct rte_eth_dev *); -void mlx5_promiscuous_disable(struct rte_eth_dev *); -void mlx5_allmulticast_enable(struct rte_eth_dev *); -void mlx5_allmulticast_disable(struct rte_eth_dev *); +void mlx5_promiscuous_enable(struct rte_eth_dev *dev); +void mlx5_promiscuous_disable(struct rte_eth_dev *dev); +void mlx5_allmulticast_enable(struct rte_eth_dev *dev); +void mlx5_allmulticast_disable(struct rte_eth_dev *dev); /* mlx5_stats.c */ -void priv_xstats_init(struct priv *); -int mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *); -void mlx5_stats_reset(struct rte_eth_dev *); -int mlx5_xstats_get(struct rte_eth_dev *, - struct rte_eth_xstat *, unsigned int); -void mlx5_xstats_reset(struct rte_eth_dev *); -int mlx5_xstats_get_names(struct rte_eth_dev *, - struct rte_eth_xstat_name *, unsigned int); +void mlx5_xstats_init(struct rte_eth_dev *dev); +int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +void mlx5_stats_reset(struct rte_eth_dev *dev); +int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n); +void mlx5_xstats_reset(struct rte_eth_dev *dev); +int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, + unsigned int n); /* mlx5_vlan.c */ -int mlx5_vlan_filter_set(struct rte_eth_dev *, uint16_t, int); -int mlx5_vlan_offload_set(struct rte_eth_dev *, int); -void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int); +int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); +int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask); /* mlx5_trigger.c */ -int mlx5_dev_start(struct rte_eth_dev *); -void mlx5_dev_stop(struct rte_eth_dev *); -int priv_dev_traffic_enable(struct priv *, struct rte_eth_dev *); -int priv_dev_traffic_disable(struct priv *, struct rte_eth_dev *); -int priv_dev_traffic_restart(struct priv *, struct rte_eth_dev *); -int mlx5_traffic_restart(struct rte_eth_dev *); +int mlx5_dev_start(struct rte_eth_dev *dev); +void mlx5_dev_stop(struct rte_eth_dev *dev); +int mlx5_traffic_enable(struct rte_eth_dev *dev); +void mlx5_traffic_disable(struct rte_eth_dev *dev); +int mlx5_traffic_restart(struct rte_eth_dev *dev); /* mlx5_flow.c */ -int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type, - enum rte_filter_op, void *); -int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *, - const struct rte_flow_item [], - const struct rte_flow_action [], - struct rte_flow_error *); -struct rte_flow *mlx5_flow_create(struct rte_eth_dev *, - const struct rte_flow_attr *, - const struct rte_flow_item [], - const struct rte_flow_action [], - struct rte_flow_error *); -int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *, - struct rte_flow_error *); -void priv_flow_flush(struct priv *, struct mlx5_flows *); -int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *); -int mlx5_flow_query(struct rte_eth_dev *, struct rte_flow *, - enum rte_flow_action_type, void *, - struct rte_flow_error *); -int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *); -int priv_flow_start(struct priv *, struct mlx5_flows *); -void priv_flow_stop(struct priv *, struct mlx5_flows *); -int priv_flow_verify(struct priv *); -int mlx5_ctrl_flow_vlan(struct rte_eth_dev *, struct rte_flow_item_eth *, - struct rte_flow_item_eth *, struct rte_flow_item_vlan *, - struct rte_flow_item_vlan *); -int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *, - struct rte_flow_item_eth *); -int priv_flow_create_drop_queue(struct priv *); -void priv_flow_delete_drop_queue(struct priv *); +unsigned int mlx5_get_max_verbs_prio(struct rte_eth_dev *dev); +int mlx5_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error); +void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list); +int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *action, void *data, + struct rte_flow_error *error); +int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable, + struct rte_flow_error *error); +int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg); +int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list); +void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list); +int mlx5_flow_verify(struct rte_eth_dev *dev); +int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask, + struct rte_flow_item_vlan *vlan_spec, + struct rte_flow_item_vlan *vlan_mask); +int mlx5_ctrl_flow(struct rte_eth_dev *dev, + struct rte_flow_item_eth *eth_spec, + struct rte_flow_item_eth *eth_mask); +int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev); +void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev); /* mlx5_socket.c */ -int priv_socket_init(struct priv *priv); -int priv_socket_uninit(struct priv *priv); -void priv_socket_handle(struct priv *priv); -int priv_socket_connect(struct priv *priv); - -/* mlx5_mr.c */ - -struct mlx5_mr *priv_mr_new(struct priv *, struct rte_mempool *); -struct mlx5_mr *priv_mr_get(struct priv *, struct rte_mempool *); -int priv_mr_release(struct priv *, struct mlx5_mr *); -int priv_mr_verify(struct priv *); +int mlx5_socket_init(struct rte_eth_dev *priv); +void mlx5_socket_uninit(struct rte_eth_dev *priv); +void mlx5_socket_handle(struct rte_eth_dev *priv); +int mlx5_socket_connect(struct rte_eth_dev *priv); + +/* mlx5_nl.c */ + +int mlx5_nl_init(uint32_t nlgroups); +int mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index); +int mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index); +void mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev); +void mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev); +int mlx5_nl_promisc(struct rte_eth_dev *dev, int enable); +int mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable); #endif /* RTE_PMD_MLX5_H_ */ diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index c3334ca3..51124cdc 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_DEFS_H_ @@ -13,8 +13,13 @@ /* Reported driver name. */ #define MLX5_DRIVER_NAME "net_mlx5" +/* Maximum number of simultaneous unicast MAC addresses. */ +#define MLX5_MAX_UC_MAC_ADDRESSES 128 +/* Maximum number of simultaneous Multicast MAC addresses. */ +#define MLX5_MAX_MC_MAC_ADDRESSES 128 /* Maximum number of simultaneous MAC addresses. */ -#define MLX5_MAX_MAC_ADDRESSES 128 +#define MLX5_MAX_MAC_ADDRESSES \ + (MLX5_MAX_UC_MAC_ADDRESSES + MLX5_MAX_MC_MAC_ADDRESSES) /* Maximum number of simultaneous VLAN filters. */ #define MLX5_MAX_VLAN_IDS 128 @@ -32,16 +37,11 @@ */ #define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3) -/* - * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP - * from which buffers are to be transmitted will have to be mapped by this - * driver to their own Memory Region (MR). This is a slow operation. - * - * This value is always 1 for RX queues. - */ -#ifndef MLX5_PMD_TX_MP_CACHE -#define MLX5_PMD_TX_MP_CACHE 8 -#endif +/* Size of per-queue MR cache array for linear search. */ +#define MLX5_MR_CACHE_N 8 + +/* Size of MR cache table for binary search. */ +#define MLX5_MR_BTREE_CACHE_N 256 /* * If defined, only use software counters. The PMD will never ask the hardware @@ -58,7 +58,7 @@ #define MLX5_MAX_XSTATS 32 /* Maximum Packet headers size (L2+L3+L4) for TSO. */ -#define MLX5_MAX_TSO_HEADER 128 +#define MLX5_MAX_TSO_HEADER 192 /* Default minimum number of Tx queues for vectorized Tx. */ #define MLX5_VPMD_MIN_TXQS 4 @@ -82,8 +82,8 @@ /* Supported RSS */ #define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP)) -/* Maximum number of attempts to query link status before giving up. */ -#define MLX5_MAX_LINK_QUERY_ATTEMPTS 5 +/* Timeout in seconds to get a valid link status. */ +#define MLX5_LINK_STATUS_TIMEOUT 10 /* Reserved address space for UAR mapping. */ #define MLX5_UAR_SIZE (1ULL << 32) @@ -95,4 +95,22 @@ */ #define MLX5_UAR_OFFSET (1ULL << 32) +/* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */ +#define MLX5_MPRQ_STRIDE_NUM_N 4U + +/* Two-byte shift is disabled for Multi-Packet RQ. */ +#define MLX5_MPRQ_TWO_BYTE_SHIFT 0 + +/* + * Minimum size of packet to be memcpy'd instead of being attached as an + * external buffer. + */ +#define MLX5_MPRQ_MEMCPY_DEFAULT_LEN 128 + +/* Minimum number Rx queues to enable Multi-Packet RQ. */ +#define MLX5_MPRQ_MIN_RXQS 12 + +/* Cache size of mempool for Multi-Packet RQ. */ +#define MLX5_MPRQ_MP_CACHE_SZ 32 + #endif /* RTE_PMD_MLX5_DEFS_H_ */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 66650769..90488af3 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #define _GNU_SOURCE #include #include +#include #include #include #include @@ -17,14 +18,13 @@ #include #include #include -#include #include #include #include -#include #include #include #include +#include #include #include @@ -32,8 +32,9 @@ #include #include #include -#include #include +#include +#include #include "mlx5.h" #include "mlx5_glue.h" @@ -94,17 +95,18 @@ struct ethtool_link_settings { /** * Get interface name from private structure. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param[out] ifname * Interface name output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) +mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) { + struct priv *priv = dev->data->dev_private; DIR *dir; struct dirent *dent; unsigned int dev_type = 0; @@ -115,8 +117,10 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) MKSTR(path, "%s/device/net", priv->ibdev_path); dir = opendir(path); - if (dir == NULL) - return -1; + if (dir == NULL) { + rte_errno = errno; + return -rte_errno; + } } while ((dent = readdir(dir)) != NULL) { char *name = dent->d_name; @@ -163,358 +167,161 @@ try_dev_id: goto try_dev_id; dev_port_prev = dev_port; if (dev_port == (priv->port - 1u)) - snprintf(match, sizeof(match), "%s", name); + strlcpy(match, name, sizeof(match)); } closedir(dir); - if (match[0] == '\0') - return -1; + if (match[0] == '\0') { + rte_errno = ENOENT; + return -rte_errno; + } strncpy(*ifname, match, sizeof(*ifname)); return 0; } /** - * Check if the counter is located on ib counters file. + * Get the interface index from device name. * - * @param[in] cntr - * Counter name. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 1 if counter is located on ib counters file , 0 otherwise. + * Interface index on success, a negative errno value otherwise and + * rte_errno is set. */ int -priv_is_ib_cntr(const char *cntr) -{ - if (!strcmp(cntr, "out_of_buffer")) - return 1; - return 0; -} - -/** - * Read from sysfs entry. - * - * @param[in] priv - * Pointer to private structure. - * @param[in] entry - * Entry name relative to sysfs path. - * @param[out] buf - * Data output buffer. - * @param size - * Buffer size. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -static int -priv_sysfs_read(const struct priv *priv, const char *entry, - char *buf, size_t size) -{ - char ifname[IF_NAMESIZE]; - FILE *file; - int ret; - int err; - - if (priv_get_ifname(priv, &ifname)) - return -1; - - if (priv_is_ib_cntr(entry)) { - MKSTR(path, "%s/ports/1/hw_counters/%s", - priv->ibdev_path, entry); - file = fopen(path, "rb"); - } else { - MKSTR(path, "%s/device/net/%s/%s", - priv->ibdev_path, ifname, entry); - file = fopen(path, "rb"); - } - if (file == NULL) - return -1; - ret = fread(buf, 1, size, file); - err = errno; - if (((size_t)ret < size) && (ferror(file))) - ret = -1; - else - ret = size; - fclose(file); - errno = err; - return ret; -} - -/** - * Write to sysfs entry. - * - * @param[in] priv - * Pointer to private structure. - * @param[in] entry - * Entry name relative to sysfs path. - * @param[in] buf - * Data buffer. - * @param size - * Buffer size. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -static int -priv_sysfs_write(const struct priv *priv, const char *entry, - char *buf, size_t size) +mlx5_ifindex(const struct rte_eth_dev *dev) { char ifname[IF_NAMESIZE]; - FILE *file; int ret; - int err; - - if (priv_get_ifname(priv, &ifname)) - return -1; - MKSTR(path, "%s/device/net/%s/%s", priv->ibdev_path, ifname, entry); - - file = fopen(path, "wb"); - if (file == NULL) - return -1; - ret = fwrite(buf, 1, size, file); - err = errno; - if (((size_t)ret < size) || (ferror(file))) - ret = -1; - else - ret = size; - fclose(file); - errno = err; - return ret; -} - -/** - * Get unsigned long sysfs property. - * - * @param priv - * Pointer to private structure. - * @param[in] name - * Entry name relative to sysfs path. - * @param[out] value - * Value output buffer. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -static int -priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value) -{ - int ret; - unsigned long value_ret; - char value_str[32]; - - ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1)); - if (ret == -1) { - DEBUG("cannot read %s value from sysfs: %s", - name, strerror(errno)); - return -1; - } - value_str[ret] = '\0'; - errno = 0; - value_ret = strtoul(value_str, NULL, 0); - if (errno) { - DEBUG("invalid %s value `%s': %s", name, value_str, - strerror(errno)); - return -1; - } - *value = value_ret; - return 0; -} - -/** - * Set unsigned long sysfs property. - * - * @param priv - * Pointer to private structure. - * @param[in] name - * Entry name relative to sysfs path. - * @param value - * Value to set. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -static int -priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value) -{ - int ret; - MKSTR(value_str, "%lu", value); - - ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1)); + ret = mlx5_get_ifname(dev, &ifname); + if (ret) + return ret; + ret = if_nametoindex(ifname); if (ret == -1) { - DEBUG("cannot write %s `%s' (%lu) to sysfs: %s", - name, value_str, value, strerror(errno)); - return -1; + rte_errno = errno; + return -rte_errno; } - return 0; + return ret; } /** * Perform ifreq ioctl() on associated Ethernet device. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param req * Request number to pass to ioctl(). * @param[out] ifr * Interface request structure output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) +mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) { int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); - int ret = -1; + int ret = 0; - if (sock == -1) - return ret; - if (priv_get_ifname(priv, &ifr->ifr_name) == 0) - ret = ioctl(sock, req, ifr); + if (sock == -1) { + rte_errno = errno; + return -rte_errno; + } + ret = mlx5_get_ifname(dev, &ifr->ifr_name); + if (ret) + goto error; + ret = ioctl(sock, req, ifr); + if (ret == -1) { + rte_errno = errno; + goto error; + } close(sock); - return ret; -} - -/** - * Return the number of active VFs for the current device. - * - * @param[in] priv - * Pointer to private structure. - * @param[out] num_vfs - * Number of active VFs. - * - * @return - * 0 on success, -1 on failure and errno is set. - */ -int -priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs) -{ - /* The sysfs entry name depends on the operating system. */ - const char **name = (const char *[]){ - "device/sriov_numvfs", - "device/mlx5_num_vfs", - NULL, - }; - int ret; - - do { - unsigned long ulong_num_vfs; - - ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs); - if (!ret) - *num_vfs = ulong_num_vfs; - } while (*(++name) && ret); - return ret; + return 0; +error: + close(sock); + return -rte_errno; } /** * Get device MTU. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] mtu * MTU value output buffer. * * @return - * 0 on success, -1 on failure and errno is set. - */ -int -priv_get_mtu(struct priv *priv, uint16_t *mtu) -{ - unsigned long ulong_mtu; - - if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1) - return -1; - *mtu = ulong_mtu; - return 0; -} - -/** - * Read device counter from sysfs. - * - * @param priv - * Pointer to private structure. - * @param name - * Counter name. - * @param[out] cntr - * Counter output buffer. - * - * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr) +mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) { - unsigned long ulong_ctr; + struct ifreq request; + int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); - if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1) - return -1; - *cntr = ulong_ctr; + if (ret) + return ret; + *mtu = request.ifr_mtu; return 0; } /** * Set device MTU. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param mtu * MTU value to set. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_set_mtu(struct priv *priv, uint16_t mtu) +mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { - uint16_t new_mtu; + struct ifreq request = { .ifr_mtu = mtu, }; - if (priv_set_sysfs_ulong(priv, "mtu", mtu) || - priv_get_mtu(priv, &new_mtu)) - return -1; - if (new_mtu == mtu) - return 0; - errno = EINVAL; - return -1; + return mlx5_ifreq(dev, SIOCSIFMTU, &request); } /** * Set device flags. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param keep * Bitmask for flags that must remain untouched. * @param flags * Bitmask for flags to modify. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) +mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) { - unsigned long tmp; + struct ifreq request; + int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); - if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1) - return -1; - tmp &= keep; - tmp |= (flags & (~keep)); - return priv_set_sysfs_ulong(priv, "flags", tmp); + if (ret) + return ret; + request.ifr_flags &= keep; + request.ifr_flags |= flags & ~keep; + return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); } /** - * Ethernet device configuration. - * - * Prepare the driver for a given number of TX and RX queues. + * DPDK callback for Ethernet device configuration. * * @param dev * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -dev_configure(struct rte_eth_dev *dev) +int +mlx5_dev_configure(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; unsigned int rxqs_n = dev->data->nb_rx_queues; @@ -524,37 +331,24 @@ dev_configure(struct rte_eth_dev *dev) unsigned int reta_idx_n; const uint8_t use_app_rss_key = !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; - uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv); - uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; - uint64_t supp_rx_offloads = - (mlx5_priv_get_rx_port_offloads(priv) | - mlx5_priv_get_rx_queue_offloads(priv)); - uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; - - if ((tx_offloads & supp_tx_offloads) != tx_offloads) { - ERROR("Some Tx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64, - tx_offloads, supp_tx_offloads); - return ENOTSUP; - } - if ((rx_offloads & supp_rx_offloads) != rx_offloads) { - ERROR("Some Rx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64, - rx_offloads, supp_rx_offloads); - return ENOTSUP; - } + int ret = 0; + if (use_app_rss_key && (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != rss_hash_default_key_len)) { - /* MLX5 RSS only support 40bytes key. */ - return EINVAL; + DRV_LOG(ERR, "port %u RSS key len must be %zu Bytes long", + dev->data->port_id, rss_hash_default_key_len); + rte_errno = EINVAL; + return -rte_errno; } priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, rss_hash_default_key_len, 0); if (!priv->rss_conf.rss_key) { - ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n); - return ENOMEM; + DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)", + dev->data->port_id, rxqs_n); + rte_errno = ENOMEM; + return -rte_errno; } memcpy(priv->rss_conf.rss_key, use_app_rss_key ? @@ -566,18 +360,20 @@ dev_configure(struct rte_eth_dev *dev) priv->rxqs = (void *)dev->data->rx_queues; priv->txqs = (void *)dev->data->tx_queues; if (txqs_n != priv->txqs_n) { - INFO("%p: TX queues number update: %u -> %u", - (void *)dev, priv->txqs_n, txqs_n); + DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u", + dev->data->port_id, priv->txqs_n, txqs_n); priv->txqs_n = txqs_n; } if (rxqs_n > priv->config.ind_table_max_size) { - ERROR("cannot handle this many RX queues (%u)", rxqs_n); - return EINVAL; + DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)", + dev->data->port_id, rxqs_n); + rte_errno = EINVAL; + return -rte_errno; } if (rxqs_n == priv->rxqs_n) return 0; - INFO("%p: RX queues number update: %u -> %u", - (void *)dev, priv->rxqs_n, rxqs_n); + DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", + dev->data->port_id, priv->rxqs_n, rxqs_n); priv->rxqs_n = rxqs_n; /* If the requested number of RX queues is not a power of two, use the * maximum indirection table size for better balancing. @@ -585,8 +381,9 @@ dev_configure(struct rte_eth_dev *dev) reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? priv->config.ind_table_max_size : rxqs_n)); - if (priv_rss_reta_index_resize(priv, reta_idx_n)) - return ENOMEM; + ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); + if (ret) + return ret; /* When the number of RX queues is not a power of two, the remaining * table entries are padded with reused WQs and hashes are not spread * uniformly. */ @@ -599,25 +396,42 @@ dev_configure(struct rte_eth_dev *dev) } /** - * DPDK callback for Ethernet device configuration. + * Sets default tuning parameters. * * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative errno value on failure. + * Pointer to Ethernet device. + * @param[out] info + * Info structure output buffer. */ -int -mlx5_dev_configure(struct rte_eth_dev *dev) +static void +mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) { struct priv *priv = dev->data->dev_private; - int ret; - priv_lock(priv); - ret = dev_configure(dev); - assert(ret >= 0); - priv_unlock(priv); - return -ret; + /* Minimum CPU utilization. */ + info->default_rxportconf.ring_size = 256; + info->default_txportconf.ring_size = 256; + info->default_rxportconf.burst_size = 64; + info->default_txportconf.burst_size = 64; + if (priv->link_speed_capa & ETH_LINK_SPEED_100G) { + info->default_rxportconf.nb_queues = 16; + info->default_txportconf.nb_queues = 16; + if (dev->data->nb_rx_queues > 2 || + dev->data->nb_tx_queues > 2) { + /* Max Throughput. */ + info->default_rxportconf.ring_size = 2048; + info->default_txportconf.ring_size = 2048; + } + } else { + info->default_rxportconf.nb_queues = 8; + info->default_txportconf.nb_queues = 8; + if (dev->data->nb_rx_queues > 2 || + dev->data->nb_tx_queues > 2) { + /* Max Throughput. */ + info->default_rxportconf.ring_size = 4096; + info->default_txportconf.ring_size = 4096; + } + } } /** @@ -636,9 +450,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) unsigned int max; char ifname[IF_NAMESIZE]; - info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - - priv_lock(priv); /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; info->max_rx_pktlen = 65536; @@ -653,22 +464,30 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) max = 65535; info->max_rx_queues = max; info->max_tx_queues = max; - info->max_mac_addrs = RTE_DIM(priv->mac); - info->rx_queue_offload_capa = - mlx5_priv_get_rx_queue_offloads(priv); - info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) | + info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES; + info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); + info->rx_offload_capa = (mlx5_get_rx_port_offloads() | info->rx_queue_offload_capa); - info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv); - if (priv_get_ifname(priv, &ifname) == 0) + info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); + if (mlx5_get_ifname(dev, &ifname) == 0) info->if_index = if_nametoindex(ifname); info->reta_size = priv->reta_idx_n ? priv->reta_idx_n : config->ind_table_max_size; - info->hash_key_size = priv->rss_conf.rss_key_len; + info->hash_key_size = rss_hash_default_key_len; info->speed_capa = priv->link_speed_capa; info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; - priv_unlock(priv); + mlx5_set_default_params(dev, info); } +/** + * Get supported packet types. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * A pointer to the supported Packet types array. + */ const uint32_t * mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) { @@ -691,6 +510,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) }; if (dev->rx_pkt_burst == mlx5_rx_burst || + dev->rx_pkt_burst == mlx5_rx_burst_mprq || dev->rx_pkt_burst == mlx5_rx_burst_vec) return ptypes; return NULL; @@ -701,11 +521,15 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). + * @param[out] link + * Storage for current link status. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, + struct rte_eth_link *link) { struct priv *priv = dev->data->dev_private; struct ethtool_cmd edata = { @@ -714,26 +538,28 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) struct ifreq ifr; struct rte_eth_link dev_link; int link_speed = 0; + int ret; - /* priv_lock() is not taken to allow concurrent calls. */ - - (void)wait_to_complete; - if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { - WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&edata; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { - WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", - strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } link_speed = ethtool_cmd_speed(&edata); if (link_speed == -1) - dev_link.link_speed = 0; + dev_link.link_speed = ETH_SPEED_NUM_NONE; else dev_link.link_speed = link_speed; priv->link_speed_capa = 0; @@ -753,13 +579,13 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); - if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { - /* Link status changed. */ - dev->data->dev_link = dev_link; - return 0; + if ((dev_link.link_speed && !dev_link.link_status) || + (!dev_link.link_speed && dev_link.link_status)) { + rte_errno = EAGAIN; + return -rte_errno; } - /* Link status is still the same. */ - return -1; + *link = dev_link; + return 0; } /** @@ -767,31 +593,41 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). + * @param[out] link + * Storage for current link status. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, + struct rte_eth_link *link) + { struct priv *priv = dev->data->dev_private; struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; struct ifreq ifr; struct rte_eth_link dev_link; uint64_t sc; + int ret; - (void)wait_to_complete; - if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { - WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = (void *)&gcmd; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { - DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", - strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(DEBUG, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" + " failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; @@ -802,10 +638,13 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) *ecmd = gcmd; ifr.ifr_data = (void *)ecmd; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { - DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", - strerror(errno)); - return -1; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(DEBUG, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" + " failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } dev_link.link_speed = ecmd->speed; sc = ecmd->link_mode_masks[0] | @@ -849,121 +688,13 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); - if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { - /* Link status changed. */ - dev->data->dev_link = dev_link; - return 0; - } - /* Link status is still the same. */ - return -1; -} - -/** - * Enable receiving and transmitting traffic. - * - * @param priv - * Pointer to private structure. - */ -static void -priv_link_start(struct priv *priv) -{ - struct rte_eth_dev *dev = priv->dev; - int err; - - dev->tx_pkt_burst = priv_select_tx_function(priv, dev); - dev->rx_pkt_burst = priv_select_rx_function(priv, dev); - err = priv_dev_traffic_enable(priv, dev); - if (err) - ERROR("%p: error occurred while configuring control flows: %s", - (void *)priv, strerror(err)); - err = priv_flow_start(priv, &priv->flows); - if (err) - ERROR("%p: error occurred while configuring flows: %s", - (void *)priv, strerror(err)); -} - -/** - * Disable receiving and transmitting traffic. - * - * @param priv - * Pointer to private structure. - */ -static void -priv_link_stop(struct priv *priv) -{ - struct rte_eth_dev *dev = priv->dev; - - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); - dev->rx_pkt_burst = removed_rx_burst; - dev->tx_pkt_burst = removed_tx_burst; -} - -/** - * Retrieve physical link information and update rx/tx_pkt_burst callbacks - * accordingly. - * - * @param priv - * Pointer to private structure. - * @param wait_to_complete - * Wait for request completion (ignored). - */ -int -priv_link_update(struct priv *priv, int wait_to_complete) -{ - struct rte_eth_dev *dev = priv->dev; - struct utsname utsname; - int ver[3]; - int ret; - struct rte_eth_link dev_link = dev->data->dev_link; - - if (uname(&utsname) == -1 || - sscanf(utsname.release, "%d.%d.%d", - &ver[0], &ver[1], &ver[2]) != 3 || - KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) - ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete); - else - ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete); - /* If lsc interrupt is disabled, should always be ready for traffic. */ - if (!dev->data->dev_conf.intr_conf.lsc) { - priv_link_start(priv); - return ret; + if ((dev_link.link_speed && !dev_link.link_status) || + (!dev_link.link_speed && dev_link.link_status)) { + rte_errno = EAGAIN; + return -rte_errno; } - /* Re-select burst callbacks only if link status has been changed. */ - if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { - if (dev->data->dev_link.link_status == ETH_LINK_UP) - priv_link_start(priv); - else - priv_link_stop(priv); - } - return ret; -} - -/** - * Querying the link status till it changes to the desired state. - * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS. - * - * @param priv - * Pointer to private structure. - * @param status - * Link desired status. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -priv_force_link_status_change(struct priv *priv, int status) -{ - int try = 0; - - while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) { - priv_link_update(priv, 0); - if (priv->dev->data->dev_link.link_status == status) - return 0; - try++; - sleep(1); - } - return -EAGAIN; + *link = dev_link; + return 0; } /** @@ -972,17 +703,42 @@ priv_force_link_status_change(struct priv *priv, int status) * @param dev * Pointer to Ethernet device structure. * @param wait_to_complete - * Wait for request completion (ignored). + * Wait for request completion. + * + * @return + * 0 if link status was not updated, positive if it was, a negative errno + * value otherwise and rte_errno is set. */ int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) { - struct priv *priv = dev->data->dev_private; int ret; + struct rte_eth_link dev_link; + time_t start_time = time(NULL); - priv_lock(priv); - ret = priv_link_update(priv, wait_to_complete); - priv_unlock(priv); + do { + ret = mlx5_link_update_unlocked_gs(dev, &dev_link); + if (ret) + ret = mlx5_link_update_unlocked_gset(dev, &dev_link); + if (ret == 0) + break; + /* Handle wait to complete situation. */ + if (wait_to_complete && ret == -EAGAIN) { + if (abs((int)difftime(time(NULL), start_time)) < + MLX5_LINK_STATUS_TIMEOUT) { + usleep(0); + continue; + } else { + rte_errno = EBUSY; + return -rte_errno; + } + } else if (ret < 0) { + return ret; + } + } while (wait_to_complete); + ret = !!memcmp(&dev->data->dev_link, &dev_link, + sizeof(struct rte_eth_link)); + dev->data->dev_link = dev_link; return ret; } @@ -995,39 +751,33 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) * New MTU. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct priv *priv = dev->data->dev_private; - uint16_t kern_mtu; - int ret = 0; + uint16_t kern_mtu = 0; + int ret; - priv_lock(priv); - ret = priv_get_mtu(priv, &kern_mtu); + ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) - goto out; + return ret; /* Set kernel interface MTU first. */ - ret = priv_set_mtu(priv, mtu); + ret = mlx5_set_mtu(dev, mtu); if (ret) - goto out; - ret = priv_get_mtu(priv, &kern_mtu); + return ret; + ret = mlx5_get_mtu(dev, &kern_mtu); if (ret) - goto out; + return ret; if (kern_mtu == mtu) { priv->mtu = mtu; - DEBUG("adapter port %u MTU set to %u", priv->port, mtu); + DRV_LOG(DEBUG, "port %u adapter MTU set to %u", + dev->data->port_id, mtu); + return 0; } - priv_unlock(priv); - return 0; -out: - ret = errno; - WARN("cannot set port %u MTU to %u: %s", priv->port, mtu, - strerror(ret)); - priv_unlock(priv); - assert(ret >= 0); - return -ret; + rte_errno = EAGAIN; + return -rte_errno; } /** @@ -1039,12 +789,11 @@ out: * Flow control output buffer. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct priv *priv = dev->data->dev_private; struct ifreq ifr; struct ethtool_pauseparam ethpause = { .cmd = ETHTOOL_GPAUSEPARAM @@ -1052,15 +801,14 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int ret; ifr.ifr_data = (void *)ðpause; - priv_lock(priv); - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { - ret = errno; - WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" - " failed: %s", - strerror(ret)); - goto out; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" + " %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } - fc_conf->autoneg = ethpause.autoneg; if (ethpause.rx_pause && ethpause.tx_pause) fc_conf->mode = RTE_FC_FULL; @@ -1070,12 +818,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) fc_conf->mode = RTE_FC_TX_PAUSE; else fc_conf->mode = RTE_FC_NONE; - ret = 0; - -out: - priv_unlock(priv); - assert(ret >= 0); - return -ret; + return 0; } /** @@ -1087,12 +830,11 @@ out: * Flow control parameters. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct priv *priv = dev->data->dev_private; struct ifreq ifr; struct ethtool_pauseparam ethpause = { .cmd = ETHTOOL_SPAUSEPARAM @@ -1112,21 +854,15 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) ethpause.tx_pause = 1; else ethpause.tx_pause = 0; - - priv_lock(priv); - if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { - ret = errno; - WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" - " failed: %s", - strerror(ret)); - goto out; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" + " failed: %s", + dev->data->port_id, strerror(rte_errno)); + return ret; } - ret = 0; - -out: - priv_unlock(priv); - assert(ret >= 0); - return -ret; + return 0; } /** @@ -1138,7 +874,7 @@ out: * PCI bus address output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, @@ -1149,8 +885,10 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, MKSTR(path, "%s/device/uevent", device->ibdev_path); file = fopen(path, "rb"); - if (file == NULL) - return -1; + if (file == NULL) { + rte_errno = errno; + return -rte_errno; + } while (fgets(line, sizeof(line), file) == line) { size_t len = strlen(line); int ret; @@ -1179,47 +917,11 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, return 0; } -/** - * Update the link status. - * - * @param priv - * Pointer to private structure. - * - * @return - * Zero if the callback process can be called immediately. - */ -static int -priv_link_status_update(struct priv *priv) -{ - struct rte_eth_link *link = &priv->dev->data->dev_link; - - priv_link_update(priv, 0); - if (((link->link_speed == 0) && link->link_status) || - ((link->link_speed != 0) && !link->link_status)) { - /* - * Inconsistent status. Event likely occurred before the - * kernel netdevice exposes the new status. - */ - if (!priv->pending_alarm) { - priv->pending_alarm = 1; - rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US, - mlx5_dev_link_status_handler, - priv->dev); - } - return 1; - } else if (unlikely(priv->pending_alarm)) { - /* Link interrupt occurred while alarm is already scheduled. */ - priv->pending_alarm = 0; - rte_eal_alarm_cancel(mlx5_dev_link_status_handler, priv->dev); - } - return 0; -} - /** * Device status handler. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param events * Pointer to event flags holder. * @@ -1227,59 +929,36 @@ priv_link_status_update(struct priv *priv) * Events bitmap of callback process which can be called immediately. */ static uint32_t -priv_dev_status_handler(struct priv *priv) +mlx5_dev_status_handler(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct ibv_async_event event; uint32_t ret = 0; + if (mlx5_link_update(dev, 0) == -EAGAIN) { + usleep(0); + return 0; + } /* Read all message and acknowledge them. */ for (;;) { if (mlx5_glue->get_async_event(priv->ctx, &event)) break; if ((event.event_type == IBV_EVENT_PORT_ACTIVE || event.event_type == IBV_EVENT_PORT_ERR) && - (priv->dev->data->dev_conf.intr_conf.lsc == 1)) + (dev->data->dev_conf.intr_conf.lsc == 1)) ret |= (1 << RTE_ETH_EVENT_INTR_LSC); else if (event.event_type == IBV_EVENT_DEVICE_FATAL && - priv->dev->data->dev_conf.intr_conf.rmv == 1) + dev->data->dev_conf.intr_conf.rmv == 1) ret |= (1 << RTE_ETH_EVENT_INTR_RMV); else - DEBUG("event type %d on port %d not handled", - event.event_type, event.element.port_num); + DRV_LOG(DEBUG, + "port %u event type %d on not handled", + dev->data->port_id, event.event_type); mlx5_glue->ack_async_event(&event); } - if (ret & (1 << RTE_ETH_EVENT_INTR_LSC)) - if (priv_link_status_update(priv)) - ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC); return ret; } -/** - * Handle delayed link status event. - * - * @param arg - * Registered argument. - */ -void -mlx5_dev_link_status_handler(void *arg) -{ - struct rte_eth_dev *dev = arg; - struct priv *priv = dev->data->dev_private; - int ret; - - while (!priv_trylock(priv)) { - /* Alarm is being canceled. */ - if (priv->pending_alarm == 0) - return; - rte_pause(); - } - priv->pending_alarm = 0; - ret = priv_link_status_update(priv); - priv_unlock(priv); - if (!ret) - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); -} - /** * Handle interrupts from the NIC. * @@ -1292,12 +971,9 @@ void mlx5_dev_interrupt_handler(void *cb_arg) { struct rte_eth_dev *dev = cb_arg; - struct priv *priv = dev->data->dev_private; uint32_t events; - priv_lock(priv); - events = priv_dev_status_handler(priv); - priv_unlock(priv); + events = mlx5_dev_status_handler(dev); if (events & (1 << RTE_ETH_EVENT_INTR_LSC)) _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); if (events & (1 << RTE_ETH_EVENT_INTR_RMV)) @@ -1314,24 +990,21 @@ static void mlx5_dev_handler_socket(void *cb_arg) { struct rte_eth_dev *dev = cb_arg; - struct priv *priv = dev->data->dev_private; - priv_lock(priv); - priv_socket_handle(priv); - priv_unlock(priv); + mlx5_socket_handle(dev); } /** * Uninstall interrupt handler. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to the rte_eth_dev structure. + * Pointer to Ethernet device. */ void -priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) +mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + if (dev->data->dev_conf.intr_conf.lsc || dev->data->dev_conf.intr_conf.rmv) rte_intr_callback_unregister(&priv->intr_handle, @@ -1339,10 +1012,6 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) if (priv->primary_socket) rte_intr_callback_unregister(&priv->intr_handle_socket, mlx5_dev_handler_socket, dev); - if (priv->pending_alarm) { - priv->pending_alarm = 0; - rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); - } priv->intr_handle.fd = 0; priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; priv->intr_handle_socket.fd = 0; @@ -1352,21 +1021,24 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) /** * Install interrupt handler. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to the rte_eth_dev structure. + * Pointer to Ethernet device. */ void -priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) +mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) { - int rc, flags; + struct priv *priv = dev->data->dev_private; + int ret; + int flags; assert(priv->ctx->async_fd > 0); flags = fcntl(priv->ctx->async_fd, F_GETFL); - rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); - if (rc < 0) { - INFO("failed to change file descriptor async event queue"); + ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); + if (ret) { + DRV_LOG(INFO, + "port %u failed to change file descriptor async event" + " queue", + dev->data->port_id); dev->data->dev_conf.intr_conf.lsc = 0; dev->data->dev_conf.intr_conf.rmv = 0; } @@ -1377,9 +1049,11 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) rte_intr_callback_register(&priv->intr_handle, mlx5_dev_interrupt_handler, dev); } - - rc = priv_socket_init(priv); - if (!rc && priv->primary_socket) { + ret = mlx5_socket_init(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot initialise socket: %s", + dev->data->port_id, strerror(rte_errno)); + else if (priv->primary_socket) { priv->intr_handle_socket.fd = priv->primary_socket; priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT; rte_intr_callback_register(&priv->intr_handle_socket, @@ -1387,23 +1061,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) } } -/** - * Change the link state (UP / DOWN). - * - * @param priv - * Pointer to private data structure. - * @param up - * Nonzero for link up, otherwise link down. - * - * @return - * 0 on success, errno value on failure. - */ -static int -priv_dev_set_link(struct priv *priv, int up) -{ - return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP); -} - /** * DPDK callback to bring the link DOWN. * @@ -1411,18 +1068,12 @@ priv_dev_set_link(struct priv *priv, int up) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_set_link_down(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - int err; - - priv_lock(priv); - err = priv_dev_set_link(priv, 0); - priv_unlock(priv); - return err; + return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); } /** @@ -1432,63 +1083,68 @@ mlx5_set_link_down(struct rte_eth_dev *dev) * Pointer to Ethernet device structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_set_link_up(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - int err; - - priv_lock(priv); - err = priv_dev_set_link(priv, 1); - priv_unlock(priv); - return err; + return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); } /** * Configure the TX function to use. * - * @param priv - * Pointer to private data structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to private data structure. * * @return * Pointer to selected Tx burst function. */ eth_tx_burst_t -priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) +mlx5_select_tx_function(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; struct mlx5_dev_config *config = &priv->config; uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO)); + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO)); + int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)); int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); assert(priv != NULL); /* Select appropriate TX function. */ - if (vlan_insert || tso) + if (vlan_insert || tso || swp) return tx_pkt_burst; if (config->mps == MLX5_MPW_ENHANCED) { - if (priv_check_vec_tx_support(priv, dev) > 0) { - if (priv_check_raw_vec_tx_support(priv, dev) > 0) + if (mlx5_check_vec_tx_support(dev) > 0) { + if (mlx5_check_raw_vec_tx_support(dev) > 0) tx_pkt_burst = mlx5_tx_burst_raw_vec; else tx_pkt_burst = mlx5_tx_burst_vec; - DEBUG("selected Enhanced MPW TX vectorized function"); + DRV_LOG(DEBUG, + "port %u selected enhanced MPW Tx vectorized" + " function", + dev->data->port_id); } else { tx_pkt_burst = mlx5_tx_burst_empw; - DEBUG("selected Enhanced MPW TX function"); + DRV_LOG(DEBUG, + "port %u selected enhanced MPW Tx function", + dev->data->port_id); } } else if (config->mps && (config->txq_inline > 0)) { tx_pkt_burst = mlx5_tx_burst_mpw_inline; - DEBUG("selected MPW inline TX function"); + DRV_LOG(DEBUG, "port %u selected MPW inline Tx function", + dev->data->port_id); } else if (config->mps) { tx_pkt_burst = mlx5_tx_burst_mpw; - DEBUG("selected MPW TX function"); + DRV_LOG(DEBUG, "port %u selected MPW Tx function", + dev->data->port_id); } return tx_pkt_burst; } @@ -1496,23 +1152,24 @@ priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) /** * Configure the RX function to use. * - * @param priv - * Pointer to private data structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to private data structure. * * @return * Pointer to selected Rx burst function. */ eth_rx_burst_t -priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev) +mlx5_select_rx_function(struct rte_eth_dev *dev) { eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; - assert(priv != NULL); - if (priv_check_vec_rx_support(priv) > 0) { + assert(dev != NULL); + if (mlx5_check_vec_rx_support(dev) > 0) { rx_pkt_burst = mlx5_rx_burst_vec; - DEBUG("selected RX vectorized function"); + DRV_LOG(DEBUG, "port %u selected Rx vectorized function", + dev->data->port_id); + } else if (mlx5_mprq_enabled(dev)) { + rx_pkt_burst = mlx5_rx_burst_mprq; } return rx_pkt_burst; } diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 26002c4b..994be05b 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -1,9 +1,10 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2016 6WIND S.A. - * Copyright 2016 Mellanox. + * Copyright 2016 Mellanox Technologies, Ltd */ #include +#include #include /* Verbs header. */ @@ -16,6 +17,9 @@ #pragma GCC diagnostic error "-Wpedantic" #endif +#include +#include +#include #include #include #include @@ -27,12 +31,13 @@ #include "mlx5_prm.h" #include "mlx5_glue.h" -/* Define minimal priority for control plane flows. */ -#define MLX5_CTRL_FLOW_PRIORITY 4 +/* Flow priority for control plane flows. */ +#define MLX5_CTRL_FLOW_PRIORITY 1 /* Internet Protocol versions. */ #define MLX5_IPV4 4 #define MLX5_IPV6 6 +#define MLX5_GRE 47 #ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT struct ibv_flow_spec_counter_action { @@ -44,40 +49,62 @@ struct ibv_flow_spec_counter_action { extern const struct eth_dev_ops mlx5_dev_ops; extern const struct eth_dev_ops mlx5_dev_ops_isolate; +/** Structure give to the conversion functions. */ +struct mlx5_flow_data { + struct rte_eth_dev *dev; /** Ethernet device. */ + struct mlx5_flow_parse *parser; /** Parser context. */ + struct rte_flow_error *error; /** Error context. */ +}; + static int mlx5_flow_create_eth(const struct rte_flow_item *item, const void *default_mask, - void *data); + struct mlx5_flow_data *data); static int mlx5_flow_create_vlan(const struct rte_flow_item *item, const void *default_mask, - void *data); + struct mlx5_flow_data *data); static int mlx5_flow_create_ipv4(const struct rte_flow_item *item, const void *default_mask, - void *data); + struct mlx5_flow_data *data); static int mlx5_flow_create_ipv6(const struct rte_flow_item *item, const void *default_mask, - void *data); + struct mlx5_flow_data *data); static int mlx5_flow_create_udp(const struct rte_flow_item *item, const void *default_mask, - void *data); + struct mlx5_flow_data *data); static int mlx5_flow_create_tcp(const struct rte_flow_item *item, const void *default_mask, - void *data); + struct mlx5_flow_data *data); static int mlx5_flow_create_vxlan(const struct rte_flow_item *item, const void *default_mask, - void *data); + struct mlx5_flow_data *data); + +static int +mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item, + const void *default_mask, + struct mlx5_flow_data *data); + +static int +mlx5_flow_create_gre(const struct rte_flow_item *item, + const void *default_mask, + struct mlx5_flow_data *data); + +static int +mlx5_flow_create_mpls(const struct rte_flow_item *item, + const void *default_mask, + struct mlx5_flow_data *data); struct mlx5_flow_parse; @@ -89,7 +116,7 @@ static int mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id); static int -mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser); +mlx5_flow_create_count(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser); /* Hash RX queue types. */ enum hash_rxq_type { @@ -100,6 +127,7 @@ enum hash_rxq_type { HASH_RXQ_UDPV6, HASH_RXQ_IPV6, HASH_RXQ_ETH, + HASH_RXQ_TUNNEL, }; /* Initialization data for hash RX queue. */ @@ -206,10 +234,10 @@ struct rte_flow { TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ uint32_t mark:1; /**< Set if the flow is marked. */ uint32_t drop:1; /**< Drop queue. */ - uint16_t queues_n; /**< Number of entries in queue[]. */ + struct rte_flow_action_rss rss_conf; /**< RSS configuration */ uint16_t (*queues)[]; /**< Queues indexes to use. */ - struct rte_eth_rss_conf rss_conf; /**< RSS configuration */ uint8_t rss_key[40]; /**< copy of the RSS key. */ + uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */ struct mlx5_flow_counter_stats counter_stats;/**> 12) + +const uint32_t ptype_ext[] = { + [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] = RTE_PTYPE_TUNNEL_VXLAN | + RTE_PTYPE_L4_UDP, + [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)] = RTE_PTYPE_TUNNEL_VXLAN_GPE | + RTE_PTYPE_L4_UDP, + [PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE, + [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)] = + RTE_PTYPE_TUNNEL_MPLS_IN_GRE, + [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)] = + RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP, +}; + /** Structure to generate a simple graph of layers supported by the NIC. */ struct mlx5_flow_items { /** List of possible actions for these items. */ @@ -247,11 +302,12 @@ struct mlx5_flow_items { * Internal structure to store the conversion. * * @return - * 0 on success, negative value otherwise. + * 0 on success, a negative errno value otherwise and rte_errno is + * set. */ int (*convert)(const struct rte_flow_item *item, const void *default_mask, - void *data); + struct mlx5_flow_data *data); /** Size in bytes of the destination structure. */ const unsigned int dst_sz; /** List of possible following items. */ @@ -274,7 +330,9 @@ static const enum rte_flow_action_type valid_actions[] = { static const struct mlx5_flow_items mlx5_flow_items[] = { [RTE_FLOW_ITEM_TYPE_END] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, - RTE_FLOW_ITEM_TYPE_VXLAN), + RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + RTE_FLOW_ITEM_TYPE_GRE), }, [RTE_FLOW_ITEM_TYPE_ETH] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN, @@ -297,6 +355,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { .actions = valid_actions, .mask = &(const struct rte_flow_item_vlan){ .tci = -1, + .inner_type = -1, }, .default_mask = &rte_flow_item_vlan_mask, .mask_sz = sizeof(struct rte_flow_item_vlan), @@ -305,7 +364,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { }, [RTE_FLOW_ITEM_TYPE_IPV4] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP), + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_GRE), .actions = valid_actions, .mask = &(const struct rte_flow_item_ipv4){ .hdr = { @@ -322,7 +382,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { }, [RTE_FLOW_ITEM_TYPE_IPV6] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP), + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_GRE), .actions = valid_actions, .mask = &(const struct rte_flow_item_ipv6){ .hdr = { @@ -349,7 +410,9 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { .dst_sz = sizeof(struct ibv_flow_spec_ipv6), }, [RTE_FLOW_ITEM_TYPE_UDP] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN), + .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN, + RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + RTE_FLOW_ITEM_TYPE_MPLS), .actions = valid_actions, .mask = &(const struct rte_flow_item_udp){ .hdr = { @@ -375,8 +438,43 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { .convert = mlx5_flow_create_tcp, .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp), }, + [RTE_FLOW_ITEM_TYPE_GRE] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_MPLS), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_gre){ + .protocol = -1, + }, + .default_mask = &rte_flow_item_gre_mask, + .mask_sz = sizeof(struct rte_flow_item_gre), + .convert = mlx5_flow_create_gre, +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + .dst_sz = sizeof(struct ibv_flow_spec_gre), +#else + .dst_sz = sizeof(struct ibv_flow_spec_tunnel), +#endif + }, + [RTE_FLOW_ITEM_TYPE_MPLS] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_mpls){ + .label_tc_s = "\xff\xff\xf0", + }, + .default_mask = &rte_flow_item_mpls_mask, + .mask_sz = sizeof(struct rte_flow_item_mpls), + .convert = mlx5_flow_create_mpls, +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + .dst_sz = sizeof(struct ibv_flow_spec_mpls), +#endif + }, [RTE_FLOW_ITEM_TYPE_VXLAN] = { - .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH), + .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, /* For L3 VXLAN. */ + RTE_FLOW_ITEM_TYPE_IPV6), /* For L3 VXLAN. */ .actions = valid_actions, .mask = &(const struct rte_flow_item_vxlan){ .vni = "\xff\xff\xff", @@ -386,28 +484,43 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { .convert = mlx5_flow_create_vxlan, .dst_sz = sizeof(struct ibv_flow_spec_tunnel), }, + [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_vxlan_gpe){ + .vni = "\xff\xff\xff", + }, + .default_mask = &rte_flow_item_vxlan_gpe_mask, + .mask_sz = sizeof(struct rte_flow_item_vxlan_gpe), + .convert = mlx5_flow_create_vxlan_gpe, + .dst_sz = sizeof(struct ibv_flow_spec_tunnel), + }, }; /** Structure to pass to the conversion function. */ struct mlx5_flow_parse { - uint32_t inner; /**< Set once VXLAN is encountered. */ + uint32_t inner; /**< Verbs value, set once tunnel is encountered. */ uint32_t create:1; /**< Whether resources should remain after a validate. */ uint32_t drop:1; /**< Target is a drop queue. */ uint32_t mark:1; /**< Mark is present in the flow. */ uint32_t count:1; /**< Count is present in the flow. */ uint32_t mark_id; /**< Mark identifier. */ + struct rte_flow_action_rss rss_conf; /**< RSS configuration */ uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */ - uint16_t queues_n; /**< Number of entries in queue[]. */ - struct rte_eth_rss_conf rss_conf; /**< RSS configuration */ uint8_t rss_key[40]; /**< copy of the RSS key. */ enum hash_rxq_type layer; /**< Last pattern layer detected. */ + enum hash_rxq_type out_layer; /**< Last outer pattern layer detected. */ + uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */ struct ibv_counter_set *cs; /**< Holds the counter set for the rule */ struct { struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ unsigned int offset; /**< Current position or total size of the attribute. */ + uint64_t hash_fields; /**< Verbs hash fields. */ } queue[RTE_DIM(hash_rxq_init)]; }; @@ -435,10 +548,18 @@ struct mlx5_fdir { struct rte_flow_item_ipv4 ipv4; struct rte_flow_item_ipv6 ipv6; } l3; + union { + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv6 ipv6; + } l3_mask; union { struct rte_flow_item_udp udp; struct rte_flow_item_tcp tcp; } l4; + union { + struct rte_flow_item_udp udp; + struct rte_flow_item_tcp tcp; + } l4_mask; struct rte_flow_action_queue queue; }; @@ -449,7 +570,7 @@ struct ibv_spec_header { }; /** - * Check support for a given item. + * Check item is fully supported by the NIC matching capability. * * @param item[in] * Item specification. @@ -460,121 +581,56 @@ struct ibv_spec_header { * Bit-Mask size in bytes. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_item_validate(const struct rte_flow_item *item, const uint8_t *mask, unsigned int size) { - int ret = 0; - - if (!item->spec && (item->mask || item->last)) - return -1; - if (item->spec && !item->mask) { - unsigned int i; - const uint8_t *spec = item->spec; - - for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; - } - if (item->last && !item->mask) { - unsigned int i; - const uint8_t *spec = item->last; - - for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; - } - if (item->mask) { - unsigned int i; - const uint8_t *spec = item->spec; - - for (i = 0; i < size; ++i) - if ((spec[i] | mask[i]) != mask[i]) - return -1; - } - if (item->spec && item->last) { - uint8_t spec[size]; - uint8_t last[size]; - const uint8_t *apply = mask; - unsigned int i; - - if (item->mask) - apply = item->mask; - for (i = 0; i < size; ++i) { - spec[i] = ((const uint8_t *)item->spec)[i] & apply[i]; - last[i] = ((const uint8_t *)item->last)[i] & apply[i]; - } - ret = memcmp(spec, last, size); - } - return ret; -} + unsigned int i; + const uint8_t *spec = item->spec; + const uint8_t *last = item->last; + const uint8_t *m = item->mask ? item->mask : mask; -/** - * Copy the RSS configuration from the user ones, of the rss_conf is null, - * uses the driver one. - * - * @param priv - * Pointer to private structure. - * @param parser - * Internal parser structure. - * @param rss_conf - * User RSS configuration to save. - * - * @return - * 0 on success, errno value on failure. - */ -static int -priv_flow_convert_rss_conf(struct priv *priv, - struct mlx5_flow_parse *parser, - const struct rte_eth_rss_conf *rss_conf) -{ + if (!spec && (item->mask || last)) + goto error; + if (!spec) + return 0; /* - * This function is also called at the beginning of - * priv_flow_convert_actions() to initialize the parser with the - * device default RSS configuration. + * Single-pass check to make sure that: + * - item->mask is supported, no bits are set outside mask. + * - Both masked item->spec and item->last are equal (no range + * supported). */ - (void)priv; - if (rss_conf) { - if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) - return EINVAL; - if (rss_conf->rss_key_len != 40) - return EINVAL; - if (rss_conf->rss_key_len && rss_conf->rss_key) { - parser->rss_conf.rss_key_len = rss_conf->rss_key_len; - memcpy(parser->rss_key, rss_conf->rss_key, - rss_conf->rss_key_len); - parser->rss_conf.rss_key = parser->rss_key; - } - parser->rss_conf.rss_hf = rss_conf->rss_hf; + for (i = 0; i < size; i++) { + if (!m[i]) + continue; + if ((m[i] | mask[i]) != mask[i]) + goto error; + if (last && ((spec[i] & m[i]) != (last[i] & m[i]))) + goto error; } return 0; +error: + rte_errno = ENOTSUP; + return -rte_errno; } /** * Extract attribute to the parser. * - * @param priv - * Pointer to private structure. * @param[in] attr * Flow rule attributes. * @param[out] error * Perform verbose error reporting if not NULL. - * @param[in, out] parser - * Internal parser structure. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_attributes(struct priv *priv, - const struct rte_flow_attr *attr, - struct rte_flow_error *error, - struct mlx5_flow_parse *parser) +mlx5_flow_convert_attributes(const struct rte_flow_attr *attr, + struct rte_flow_error *error) { - (void)priv; - (void)parser; if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -596,6 +652,13 @@ priv_flow_convert_attributes(struct priv *priv, "egress is not supported"); return -rte_errno; } + if (attr->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, + "transfer is not supported"); + return -rte_errno; + } if (!attr->ingress) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, @@ -609,8 +672,8 @@ priv_flow_convert_attributes(struct priv *priv, /** * Extract actions request to the parser. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[in] actions * Associated actions (list terminated by the END action). * @param[out] error @@ -622,83 +685,115 @@ priv_flow_convert_attributes(struct priv *priv, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_actions(struct priv *priv, +mlx5_flow_convert_actions(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { - /* - * Add default RSS configuration necessary for Verbs to create QP even - * if no RSS is necessary. - */ - priv_flow_convert_rss_conf(priv, parser, - (const struct rte_eth_rss_conf *) - &priv->rss_conf); + enum { FATE = 1, MARK = 2, COUNT = 4, }; + uint32_t overlap = 0; + struct priv *priv = dev->data->dev_private; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) { continue; } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) { + if (overlap & FATE) + goto exit_action_overlap; + overlap |= FATE; parser->drop = 1; } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) { const struct rte_flow_action_queue *queue = (const struct rte_flow_action_queue *) actions->conf; - uint16_t n; - uint16_t found = 0; + if (overlap & FATE) + goto exit_action_overlap; + overlap |= FATE; if (!queue || (queue->index > (priv->rxqs_n - 1))) goto exit_action_not_supported; - for (n = 0; n < parser->queues_n; ++n) { - if (parser->queues[n] == queue->index) { - found = 1; - break; - } - } - if (parser->queues_n > 1 && !found) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue action not in RSS queues"); - return -rte_errno; - } - if (!found) { - parser->queues_n = 1; - parser->queues[0] = queue->index; - } + parser->queues[0] = queue->index; + parser->rss_conf = (struct rte_flow_action_rss){ + .queue_num = 1, + .queue = parser->queues, + }; } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { const struct rte_flow_action_rss *rss = (const struct rte_flow_action_rss *) actions->conf; + const uint8_t *rss_key; + uint32_t rss_key_len; uint16_t n; - if (!rss || !rss->num) { + if (overlap & FATE) + goto exit_action_overlap; + overlap |= FATE; + if (rss->func && + rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "the only supported RSS hash" + " function is Toeplitz"); + return -rte_errno; + } +#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (parser->rss_conf.level > 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "a nonzero RSS encapsulation" + " level is not supported"); + return -rte_errno; + } +#endif + if (parser->rss_conf.level > 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "RSS encapsulation level" + " > 1 is not supported"); + return -rte_errno; + } + if (rss->types & MLX5_RSS_HF_MASK) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "unsupported RSS type" + " requested"); + return -rte_errno; + } + if (rss->key_len) { + rss_key_len = rss->key_len; + rss_key = rss->key; + } else { + rss_key_len = rss_hash_default_key_len; + rss_key = rss_hash_default_key; + } + if (rss_key_len != RTE_DIM(parser->rss_key)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "RSS hash key must be" + " exactly 40 bytes long"); + return -rte_errno; + } + if (!rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "no valid queues"); return -rte_errno; } - if (parser->queues_n == 1) { - uint16_t found = 0; - - assert(parser->queues_n); - for (n = 0; n < rss->num; ++n) { - if (parser->queues[0] == - rss->queue[n]) { - found = 1; - break; - } - } - if (!found) { - rte_flow_error_set(error, ENOTSUP, + if (rss->queue_num > RTE_DIM(parser->queues)) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, - "queue action not in RSS" - " queues"); - return -rte_errno; - } + "too many queues for RSS" + " context"); + return -rte_errno; } - for (n = 0; n < rss->num; ++n) { + for (n = 0; n < rss->queue_num; ++n) { if (rss->queue[n] >= priv->rxqs_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -708,22 +803,26 @@ priv_flow_convert_actions(struct priv *priv, return -rte_errno; } } - for (n = 0; n < rss->num; ++n) - parser->queues[n] = rss->queue[n]; - parser->queues_n = rss->num; - if (priv_flow_convert_rss_conf(priv, parser, - rss->rss_conf)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "wrong RSS configuration"); - return -rte_errno; - } + parser->rss_conf = (struct rte_flow_action_rss){ + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = rss->level ? rss->level : 1, + .types = rss->types, + .key_len = rss_key_len, + .queue_num = rss->queue_num, + .key = memcpy(parser->rss_key, rss_key, + sizeof(*rss_key) * rss_key_len), + .queue = memcpy(parser->queues, rss->queue, + sizeof(*rss->queue) * + rss->queue_num), + }; } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) { const struct rte_flow_action_mark *mark = (const struct rte_flow_action_mark *) actions->conf; + if (overlap & MARK) + goto exit_action_overlap; + overlap |= MARK; if (!mark) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -741,17 +840,26 @@ priv_flow_convert_actions(struct priv *priv, parser->mark = 1; parser->mark_id = mark->id; } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) { + if (overlap & MARK) + goto exit_action_overlap; + overlap |= MARK; parser->mark = 1; } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT && priv->config.flow_counter_en) { + if (overlap & COUNT) + goto exit_action_overlap; + overlap |= COUNT; parser->count = 1; } else { goto exit_action_not_supported; } } + /* When fate is unknown, drop traffic. */ + if (!(overlap & FATE)) + parser->drop = 1; if (parser->drop && parser->mark) parser->mark = 0; - if (!parser->queues_n && !parser->drop) { + if (!parser->rss_conf.queue_num && !parser->drop) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "no valid action"); return -rte_errno; @@ -761,13 +869,15 @@ exit_action_not_supported: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "action not supported"); return -rte_errno; +exit_action_overlap: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + actions, "overlapping actions are not supported"); + return -rte_errno; } /** * Validate items. * - * @param priv - * Pointer to private structure. * @param[in] items * Pattern specification (list terminated by the END pattern item). * @param[out] error @@ -779,25 +889,28 @@ exit_action_not_supported: * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_items_validate(struct priv *priv, +mlx5_flow_convert_items_validate(struct rte_eth_dev *dev, const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { + struct priv *priv = dev->data->dev_private; const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; + unsigned int last_voids = 0; + int ret = 0; - (void)priv; /* Initialise the offsets to start after verbs attribute. */ for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset = sizeof(struct ibv_flow_attr); for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { const struct mlx5_flow_items *token = NULL; unsigned int n; - int err; - if (items->type == RTE_FLOW_ITEM_TYPE_VOID) + if (items->type == RTE_FLOW_ITEM_TYPE_VOID) { + last_voids++; continue; + } for (i = 0; cur_item->items && cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END; @@ -807,24 +920,48 @@ priv_flow_convert_items_validate(struct priv *priv, break; } } - if (!token) + if (!token) { + ret = -ENOTSUP; goto exit_item_not_supported; + } cur_item = token; - err = mlx5_flow_item_validate(items, + ret = mlx5_flow_item_validate(items, (const uint8_t *)cur_item->mask, cur_item->mask_sz); - if (err) + if (ret) goto exit_item_not_supported; - if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) { - if (parser->inner) { + if (IS_TUNNEL(items->type)) { + if (parser->tunnel && + !((items - last_voids - 1)->type == + RTE_FLOW_ITEM_TYPE_GRE && items->type == + RTE_FLOW_ITEM_TYPE_MPLS)) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + items, + "Cannot recognize multiple" + " tunnel encapsulations."); + return -rte_errno; + } + if (items->type == RTE_FLOW_ITEM_TYPE_MPLS && + !priv->config.mpls_en) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, - "cannot recognize multiple" - " VXLAN encapsulations"); + "MPLS not supported or" + " disabled in firmware" + " configuration."); + return -rte_errno; + } + if (!priv->config.tunnel_en && + parser->rss_conf.level > 1) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + items, + "RSS on tunnel is not supported"); return -rte_errno; } parser->inner = IBV_FLOW_SPEC_INNER; + parser->tunnel = flow_ptype[items->type]; } if (parser->drop) { parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz; @@ -832,6 +969,7 @@ priv_flow_convert_items_validate(struct priv *priv, for (n = 0; n != hash_rxq_init_n; ++n) parser->queue[n].offset += cur_item->dst_sz; } + last_voids = 0; } if (parser->drop) { parser->queue[HASH_RXQ_ETH].offset += @@ -850,102 +988,103 @@ priv_flow_convert_items_validate(struct priv *priv, } return 0; exit_item_not_supported: - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, - items, "item not supported"); - return -rte_errno; + return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM, + items, "item not supported"); } /** * Allocate memory space to store verbs flow attributes. * - * @param priv - * Pointer to private structure. - * @param[in] priority - * Flow priority. * @param[in] size * Amount of byte to allocate. * @param[out] error * Perform verbose error reporting if not NULL. * * @return - * A verbs flow attribute on success, NULL otherwise. + * A verbs flow attribute on success, NULL otherwise and rte_errno is set. */ -static struct ibv_flow_attr* -priv_flow_convert_allocate(struct priv *priv, - unsigned int priority, - unsigned int size, - struct rte_flow_error *error) +static struct ibv_flow_attr * +mlx5_flow_convert_allocate(unsigned int size, struct rte_flow_error *error) { struct ibv_flow_attr *ibv_attr; - (void)priv; ibv_attr = rte_calloc(__func__, 1, size, 0); if (!ibv_attr) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot allocate verbs spec attributes."); + "cannot allocate verbs spec attributes"); return NULL; } - ibv_attr->priority = priority; return ibv_attr; } /** - * Finalise verbs flow attributes. + * Make inner packet matching with an higher priority from the non Inner + * matching. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[in, out] parser * Internal parser structure. + * @param attr + * User flow attribute. */ static void -priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) +mlx5_flow_update_priority(struct rte_eth_dev *dev, + struct mlx5_flow_parse *parser, + const struct rte_flow_attr *attr) { - const unsigned int ipv4 = - hash_rxq_init[parser->layer].ip_version == MLX5_IPV4; - const enum hash_rxq_type hmin = ipv4 ? HASH_RXQ_TCPV4 : HASH_RXQ_TCPV6; - const enum hash_rxq_type hmax = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; - const enum hash_rxq_type ohmin = ipv4 ? HASH_RXQ_TCPV6 : HASH_RXQ_TCPV4; - const enum hash_rxq_type ohmax = ipv4 ? HASH_RXQ_IPV6 : HASH_RXQ_IPV4; - const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; + struct priv *priv = dev->data->dev_private; unsigned int i; + uint16_t priority; - (void)priv; - if (parser->layer == HASH_RXQ_ETH) { - goto fill; - } else { - /* - * This layer becomes useless as the pattern define under - * layers. - */ - rte_free(parser->queue[HASH_RXQ_ETH].ibv_attr); - parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; + /* 8 priorities >= 16 priorities + * Control flow: 4-7 8-15 + * User normal flow: 1-3 4-7 + * User tunnel flow: 0-2 0-3 + */ + priority = attr->priority * MLX5_VERBS_FLOW_PRIO_8; + if (priv->config.max_verbs_prio == MLX5_VERBS_FLOW_PRIO_8) + priority /= 2; + /* + * Lower non-tunnel flow Verbs priority 1 if only support 8 Verbs + * priorities, lower 4 otherwise. + */ + if (!parser->inner) { + if (priv->config.max_verbs_prio == MLX5_VERBS_FLOW_PRIO_8) + priority += 1; + else + priority += MLX5_VERBS_FLOW_PRIO_8 / 2; + } + if (parser->drop) { + parser->queue[HASH_RXQ_ETH].ibv_attr->priority = priority + + hash_rxq_init[HASH_RXQ_ETH].flow_priority; + return; } - /* Remove opposite kind of layer e.g. IPv6 if the pattern is IPv4. */ - for (i = ohmin; i != (ohmax + 1); ++i) { + for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) continue; - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; + parser->queue[i].ibv_attr->priority = priority + + hash_rxq_init[i].flow_priority; } - /* Remove impossible flow according to the RSS configuration. */ - if (hash_rxq_init[parser->layer].dpdk_rss_hf & - parser->rss_conf.rss_hf) { - /* Remove any other flow. */ - for (i = hmin; i != (hmax + 1); ++i) { - if ((i == parser->layer) || - (!parser->queue[i].ibv_attr)) - continue; - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - } else if (!parser->queue[ip].ibv_attr) { - /* no RSS possible with the current configuration. */ - parser->queues_n = 1; +} + +/** + * Finalise verbs flow attributes. + * + * @param[in, out] parser + * Internal parser structure. + */ +static void +mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser) +{ + unsigned int i; + uint32_t inner = parser->inner; + + /* Don't create extra flows for outer RSS. */ + if (parser->tunnel && parser->rss_conf.level < 2) return; - } -fill: /* * Fill missing layers in verbs specifications, or compute the correct * offset to allocate the memory space for the attributes and @@ -956,23 +1095,25 @@ fill: struct ibv_flow_spec_ipv4_ext ipv4; struct ibv_flow_spec_ipv6 ipv6; struct ibv_flow_spec_tcp_udp udp_tcp; + struct ibv_flow_spec_eth eth; } specs; void *dst; uint16_t size; if (i == parser->layer) continue; - if (parser->layer == HASH_RXQ_ETH) { + if (parser->layer == HASH_RXQ_ETH || + parser->layer == HASH_RXQ_TUNNEL) { if (hash_rxq_init[i].ip_version == MLX5_IPV4) { size = sizeof(struct ibv_flow_spec_ipv4_ext); specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){ - .type = IBV_FLOW_SPEC_IPV4_EXT, + .type = inner | IBV_FLOW_SPEC_IPV4_EXT, .size = size, }; } else { size = sizeof(struct ibv_flow_spec_ipv6); specs.ipv6 = (struct ibv_flow_spec_ipv6){ - .type = IBV_FLOW_SPEC_IPV6, + .type = inner | IBV_FLOW_SPEC_IPV6, .size = size, }; } @@ -989,7 +1130,7 @@ fill: (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) { size = sizeof(struct ibv_flow_spec_tcp_udp); specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) { - .type = ((i == HASH_RXQ_UDPV4 || + .type = inner | ((i == HASH_RXQ_UDPV4 || i == HASH_RXQ_UDPV6) ? IBV_FLOW_SPEC_UDP : IBV_FLOW_SPEC_TCP), @@ -1007,11 +1148,111 @@ fill: } } +/** + * Update flows according to pattern and RSS hash fields. + * + * @param[in, out] parser + * Internal parser structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_convert_rss(struct mlx5_flow_parse *parser) +{ + unsigned int i; + enum hash_rxq_type start; + enum hash_rxq_type layer; + int outer = parser->tunnel && parser->rss_conf.level < 2; + uint64_t rss = parser->rss_conf.types; + + layer = outer ? parser->out_layer : parser->layer; + if (layer == HASH_RXQ_TUNNEL) + layer = HASH_RXQ_ETH; + if (outer) { + /* Only one hash type for outer RSS. */ + if (rss && layer == HASH_RXQ_ETH) { + start = HASH_RXQ_TCPV4; + } else if (rss && layer != HASH_RXQ_ETH && + !(rss & hash_rxq_init[layer].dpdk_rss_hf)) { + /* If RSS not match L4 pattern, try L3 RSS. */ + if (layer < HASH_RXQ_IPV4) + layer = HASH_RXQ_IPV4; + else if (layer > HASH_RXQ_IPV4 && layer < HASH_RXQ_IPV6) + layer = HASH_RXQ_IPV6; + start = layer; + } else { + start = layer; + } + /* Scan first valid hash type. */ + for (i = start; rss && i <= layer; ++i) { + if (!parser->queue[i].ibv_attr) + continue; + if (hash_rxq_init[i].dpdk_rss_hf & rss) + break; + } + if (rss && i <= layer) + parser->queue[layer].hash_fields = + hash_rxq_init[i].hash_fields; + /* Trim unused hash types. */ + for (i = 0; i != hash_rxq_init_n; ++i) { + if (parser->queue[i].ibv_attr && i != layer) { + rte_free(parser->queue[i].ibv_attr); + parser->queue[i].ibv_attr = NULL; + } + } + } else { + /* Expand for inner or normal RSS. */ + if (rss && (layer == HASH_RXQ_ETH || layer == HASH_RXQ_IPV4)) + start = HASH_RXQ_TCPV4; + else if (rss && layer == HASH_RXQ_IPV6) + start = HASH_RXQ_TCPV6; + else + start = layer; + /* For L4 pattern, try L3 RSS if no L4 RSS. */ + /* Trim unused hash types. */ + for (i = 0; i != hash_rxq_init_n; ++i) { + if (!parser->queue[i].ibv_attr) + continue; + if (i < start || i > layer) { + rte_free(parser->queue[i].ibv_attr); + parser->queue[i].ibv_attr = NULL; + continue; + } + if (!rss) + continue; + if (hash_rxq_init[i].dpdk_rss_hf & rss) { + parser->queue[i].hash_fields = + hash_rxq_init[i].hash_fields; + } else if (i != layer) { + /* Remove unused RSS expansion. */ + rte_free(parser->queue[i].ibv_attr); + parser->queue[i].ibv_attr = NULL; + } else if (layer < HASH_RXQ_IPV4 && + (hash_rxq_init[HASH_RXQ_IPV4].dpdk_rss_hf & + rss)) { + /* Allow IPv4 RSS on L4 pattern. */ + parser->queue[i].hash_fields = + hash_rxq_init[HASH_RXQ_IPV4] + .hash_fields; + } else if (i > HASH_RXQ_IPV4 && i < HASH_RXQ_IPV6 && + (hash_rxq_init[HASH_RXQ_IPV6].dpdk_rss_hf & + rss)) { + /* Allow IPv4 RSS on L4 pattern. */ + parser->queue[i].hash_fields = + hash_rxq_init[HASH_RXQ_IPV6] + .hash_fields; + } + } + } + return 0; +} + /** * Validate and convert a flow supported by the NIC. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[in] attr * Flow rule attributes. * @param[in] pattern @@ -1027,7 +1268,7 @@ fill: * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert(struct priv *priv, +mlx5_flow_convert(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -1044,48 +1285,36 @@ priv_flow_convert(struct priv *priv, .layer = HASH_RXQ_ETH, .mark_id = MLX5_FLOW_MARK_DEFAULT, }; - ret = priv_flow_convert_attributes(priv, attr, error, parser); + ret = mlx5_flow_convert_attributes(attr, error); if (ret) return ret; - ret = priv_flow_convert_actions(priv, actions, error, parser); + ret = mlx5_flow_convert_actions(dev, actions, error, parser); if (ret) return ret; - ret = priv_flow_convert_items_validate(priv, items, error, parser); + ret = mlx5_flow_convert_items_validate(dev, items, error, parser); if (ret) return ret; - priv_flow_convert_finalise(priv, parser); + mlx5_flow_convert_finalise(parser); /* * Second step. * Allocate the memory space to store verbs specifications. */ if (parser->drop) { - unsigned int priority = - attr->priority + - hash_rxq_init[HASH_RXQ_ETH].flow_priority; unsigned int offset = parser->queue[HASH_RXQ_ETH].offset; parser->queue[HASH_RXQ_ETH].ibv_attr = - priv_flow_convert_allocate(priv, priority, - offset, error); + mlx5_flow_convert_allocate(offset, error); if (!parser->queue[HASH_RXQ_ETH].ibv_attr) - return ENOMEM; + goto exit_enomem; parser->queue[HASH_RXQ_ETH].offset = sizeof(struct ibv_flow_attr); } else { for (i = 0; i != hash_rxq_init_n; ++i) { - unsigned int priority = - attr->priority + - hash_rxq_init[i].flow_priority; unsigned int offset; - if (!(parser->rss_conf.rss_hf & - hash_rxq_init[i].dpdk_rss_hf) && - (i != HASH_RXQ_ETH)) - continue; offset = parser->queue[i].offset; parser->queue[i].ibv_attr = - priv_flow_convert_allocate(priv, priority, - offset, error); + mlx5_flow_convert_allocate(offset, error); if (!parser->queue[i].ibv_attr) goto exit_enomem; parser->queue[i].offset = sizeof(struct ibv_flow_attr); @@ -1093,7 +1322,15 @@ priv_flow_convert(struct priv *priv, } /* Third step. Conversion parse, fill the specifications. */ parser->inner = 0; + parser->tunnel = 0; + parser->layer = HASH_RXQ_ETH; for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { + struct mlx5_flow_data data = { + .dev = dev, + .parser = parser, + .error = error, + }; + if (items->type == RTE_FLOW_ITEM_TYPE_VOID) continue; cur_item = &mlx5_flow_items[items->type]; @@ -1101,32 +1338,26 @@ priv_flow_convert(struct priv *priv, (cur_item->default_mask ? cur_item->default_mask : cur_item->mask), - parser); - if (ret) { - rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_ITEM, - items, "item not supported"); + &data); + if (ret) goto exit_free; - } } + if (!parser->drop) { + /* RSS check, remove unused hash types. */ + ret = mlx5_flow_convert_rss(parser); + if (ret) + goto exit_free; + /* Complete missing specification. */ + mlx5_flow_convert_finalise(parser); + } + mlx5_flow_update_priority(dev, parser, attr); if (parser->mark) mlx5_flow_create_flag_mark(parser, parser->mark_id); if (parser->count && parser->create) { - mlx5_flow_create_count(priv, parser); + mlx5_flow_create_count(dev, parser); if (!parser->cs) goto exit_count_error; } - /* - * Last step. Complete missing specification to reach the RSS - * configuration. - */ - if (!parser->drop) { - priv_flow_convert_finalise(priv, parser); - } else { - parser->queue[HASH_RXQ_ETH].ibv_attr->priority = - attr->priority + - hash_rxq_init[parser->layer].flow_priority; - } exit_free: /* Only verification is expected, all resources should be released. */ if (!parser->create) { @@ -1145,13 +1376,13 @@ exit_enomem: parser->queue[i].ibv_attr = NULL; } } - rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot allocate verbs spec attributes."); - return ret; + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "cannot allocate verbs spec attributes"); + return -rte_errno; exit_count_error: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "cannot create counter."); - return rte_errno; + NULL, "cannot create counter"); + return -rte_errno; } /** @@ -1174,17 +1405,11 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) continue; - /* Specification must be the same l3 type or none. */ - if (parser->layer == HASH_RXQ_ETH || - (hash_rxq_init[parser->layer].ip_version == - hash_rxq_init[i].ip_version) || - (hash_rxq_init[i].ip_version == 0)) { - dst = (void *)((uintptr_t)parser->queue[i].ibv_attr + - parser->queue[i].offset); - memcpy(dst, src, size); - ++parser->queue[i].ibv_attr->num_of_specs; - parser->queue[i].offset += size; - } + dst = (void *)((uintptr_t)parser->queue[i].ibv_attr + + parser->queue[i].offset); + memcpy(dst, src, size); + ++parser->queue[i].ibv_attr->num_of_specs; + parser->queue[i].offset += size; } } @@ -1197,24 +1422,25 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_eth(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct mlx5_flow_data *data) { const struct rte_flow_item_eth *spec = item->spec; const struct rte_flow_item_eth *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; + struct mlx5_flow_parse *parser = data->parser; const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth); struct ibv_flow_spec_eth eth = { .type = parser->inner | IBV_FLOW_SPEC_ETH, .size = eth_size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_ETH; + parser->layer = HASH_RXQ_ETH; if (spec) { unsigned int i; @@ -1246,17 +1472,21 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_vlan(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct mlx5_flow_data *data) { const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; + struct mlx5_flow_parse *parser = data->parser; struct ibv_flow_spec_eth *eth; const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth); + const char *msg = "VLAN cannot be empty"; if (spec) { unsigned int i; @@ -1272,9 +1502,26 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item, eth->val.vlan_tag = spec->tci; eth->mask.vlan_tag = mask->tci; eth->val.vlan_tag &= eth->mask.vlan_tag; + /* + * From verbs perspective an empty VLAN is equivalent + * to a packet without VLAN layer. + */ + if (!eth->mask.vlan_tag) + goto error; + /* Outer TPID cannot be matched. */ + if (eth->mask.ether_type) { + msg = "VLAN TPID matching is not supported"; + goto error; + } + eth->val.ether_type = spec->inner_type; + eth->mask.ether_type = mask->inner_type; + eth->val.ether_type &= eth->mask.ether_type; } + return 0; } - return 0; +error: + return rte_flow_error_set(data->error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + item, msg); } /** @@ -1286,24 +1533,35 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_ipv4(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct mlx5_flow_data *data) { + struct priv *priv = data->dev->data->dev_private; const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; + struct mlx5_flow_parse *parser = data->parser; unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext); struct ibv_flow_spec_ipv4_ext ipv4 = { .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT, .size = ipv4_size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_IPV4; + if (parser->layer == HASH_RXQ_TUNNEL && + parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && + !priv->config.l3_vxlan_en) + return rte_flow_error_set(data->error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 VXLAN not enabled by device" + " parameter and/or not configured" + " in firmware"); + parser->layer = HASH_RXQ_IPV4; if (spec) { if (!mask) mask = default_mask; @@ -1338,24 +1596,35 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_ipv6(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct mlx5_flow_data *data) { + struct priv *priv = data->dev->data->dev_private; const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; + struct mlx5_flow_parse *parser = data->parser; unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6); struct ibv_flow_spec_ipv6 ipv6 = { .type = parser->inner | IBV_FLOW_SPEC_IPV6, .size = ipv6_size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_IPV6; + if (parser->layer == HASH_RXQ_TUNNEL && + parser->tunnel == ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)] && + !priv->config.l3_vxlan_en) + return rte_flow_error_set(data->error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 VXLAN not enabled by device" + " parameter and/or not configured" + " in firmware"); + parser->layer = HASH_RXQ_IPV6; if (spec) { unsigned int i; uint32_t vtc_flow_val; @@ -1410,28 +1679,28 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_udp(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct mlx5_flow_data *data) { const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; + struct mlx5_flow_parse *parser = data->parser; unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp udp = { .type = parser->inner | IBV_FLOW_SPEC_UDP, .size = udp_size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) { - if (parser->layer == HASH_RXQ_IPV4) - parser->layer = HASH_RXQ_UDPV4; - else - parser->layer = HASH_RXQ_UDPV6; - } + if (parser->layer == HASH_RXQ_IPV4) + parser->layer = HASH_RXQ_UDPV4; + else + parser->layer = HASH_RXQ_UDPV6; if (spec) { if (!mask) mask = default_mask; @@ -1456,28 +1725,28 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_tcp(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct mlx5_flow_data *data) { const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; + struct mlx5_flow_parse *parser = data->parser; unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp tcp = { .type = parser->inner | IBV_FLOW_SPEC_TCP, .size = tcp_size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) { - if (parser->layer == HASH_RXQ_IPV4) - parser->layer = HASH_RXQ_TCPV4; - else - parser->layer = HASH_RXQ_TCPV6; - } + if (parser->layer == HASH_RXQ_IPV4) + parser->layer = HASH_RXQ_TCPV4; + else + parser->layer = HASH_RXQ_TCPV6; if (spec) { if (!mask) mask = default_mask; @@ -1502,15 +1771,18 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, * Default bit-masks to use when item->mask is not provided. * @param data[in, out] * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_vxlan(const struct rte_flow_item *item, const void *default_mask, - void *data) + struct mlx5_flow_data *data) { const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; - struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data; + struct mlx5_flow_parse *parser = data->parser; unsigned int size = sizeof(struct ibv_flow_spec_tunnel); struct ibv_flow_spec_tunnel vxlan = { .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL, @@ -1523,6 +1795,9 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, id.vni[0] = 0; parser->inner = IBV_FLOW_SPEC_INNER; + parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)]; + parser->out_layer = parser->layer; + parser->layer = HASH_RXQ_TUNNEL; if (spec) { if (!mask) mask = default_mask; @@ -1541,12 +1816,250 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, * before will also match this rule. * To avoid such situation, VNI 0 is currently refused. */ - if (!vxlan.val.tunnel_id) - return EINVAL; + /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ + if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) + return rte_flow_error_set(data->error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VxLAN vni cannot be 0"); mlx5_flow_create_copy(parser, &vxlan, size); return 0; } +/** + * Convert VXLAN-GPE item to Verbs specification. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item, + const void *default_mask, + struct mlx5_flow_data *data) +{ + struct priv *priv = data->dev->data->dev_private; + const struct rte_flow_item_vxlan_gpe *spec = item->spec; + const struct rte_flow_item_vxlan_gpe *mask = item->mask; + struct mlx5_flow_parse *parser = data->parser; + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel vxlan = { + .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; + union vni { + uint32_t vlan_id; + uint8_t vni[4]; + } id; + + if (!priv->config.l3_vxlan_en) + return rte_flow_error_set(data->error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "L3 VXLAN not enabled by device" + " parameter and/or not configured" + " in firmware"); + id.vni[0] = 0; + parser->inner = IBV_FLOW_SPEC_INNER; + parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)]; + parser->out_layer = parser->layer; + parser->layer = HASH_RXQ_TUNNEL; + if (spec) { + if (!mask) + mask = default_mask; + memcpy(&id.vni[1], spec->vni, 3); + vxlan.val.tunnel_id = id.vlan_id; + memcpy(&id.vni[1], mask->vni, 3); + vxlan.mask.tunnel_id = id.vlan_id; + if (spec->protocol) + return rte_flow_error_set(data->error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VxLAN-GPE protocol not" + " supported"); + /* Remove unwanted bits from values. */ + vxlan.val.tunnel_id &= vxlan.mask.tunnel_id; + } + /* + * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this + * layer is defined in the Verbs specification it is interpreted as + * wildcard and all packets will match this rule, if it follows a full + * stack layer (ex: eth / ipv4 / udp), all packets matching the layers + * before will also match this rule. + * To avoid such situation, VNI 0 is currently refused. + */ + /* Only allow tunnel w/o tunnel id pattern after proper outer spec. */ + if (parser->out_layer == HASH_RXQ_ETH && !vxlan.val.tunnel_id) + return rte_flow_error_set(data->error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VxLAN-GPE vni cannot be 0"); + mlx5_flow_create_copy(parser, &vxlan, size); + return 0; +} + +/** + * Convert GRE item to Verbs specification. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_create_gre(const struct rte_flow_item *item, + const void *default_mask, + struct mlx5_flow_data *data) +{ + struct mlx5_flow_parse *parser = data->parser; +#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT + (void)default_mask; + unsigned int size = sizeof(struct ibv_flow_spec_tunnel); + struct ibv_flow_spec_tunnel tunnel = { + .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL, + .size = size, + }; +#else + const struct rte_flow_item_gre *spec = item->spec; + const struct rte_flow_item_gre *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_gre); + struct ibv_flow_spec_gre tunnel = { + .type = parser->inner | IBV_FLOW_SPEC_GRE, + .size = size, + }; +#endif + struct ibv_flow_spec_ipv4_ext *ipv4; + struct ibv_flow_spec_ipv6 *ipv6; + unsigned int i; + + parser->inner = IBV_FLOW_SPEC_INNER; + parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)]; + parser->out_layer = parser->layer; + parser->layer = HASH_RXQ_TUNNEL; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + if (spec) { + if (!mask) + mask = default_mask; + tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; + tunnel.val.protocol = spec->protocol; + tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver; + tunnel.mask.protocol = mask->protocol; + /* Remove unwanted bits from values. */ + tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver; + tunnel.val.protocol &= tunnel.mask.protocol; + tunnel.val.key &= tunnel.mask.key; + } +#endif + /* Update encapsulation IP layer protocol. */ + for (i = 0; i != hash_rxq_init_n; ++i) { + if (!parser->queue[i].ibv_attr) + continue; + if (parser->out_layer == HASH_RXQ_IPV4) { + ipv4 = (void *)((uintptr_t)parser->queue[i].ibv_attr + + parser->queue[i].offset - + sizeof(struct ibv_flow_spec_ipv4_ext)); + if (ipv4->mask.proto && ipv4->val.proto != MLX5_GRE) + break; + ipv4->val.proto = MLX5_GRE; + ipv4->mask.proto = 0xff; + } else if (parser->out_layer == HASH_RXQ_IPV6) { + ipv6 = (void *)((uintptr_t)parser->queue[i].ibv_attr + + parser->queue[i].offset - + sizeof(struct ibv_flow_spec_ipv6)); + if (ipv6->mask.next_hdr && + ipv6->val.next_hdr != MLX5_GRE) + break; + ipv6->val.next_hdr = MLX5_GRE; + ipv6->mask.next_hdr = 0xff; + } + } + if (i != hash_rxq_init_n) + return rte_flow_error_set(data->error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "IP protocol of GRE must be 47"); + mlx5_flow_create_copy(parser, &tunnel, size); + return 0; +} + +/** + * Convert MPLS item to Verbs specification. + * MPLS tunnel types currently supported are MPLS-in-GRE and MPLS-in-UDP. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_create_mpls(const struct rte_flow_item *item, + const void *default_mask, + struct mlx5_flow_data *data) +{ +#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT + (void)default_mask; + return rte_flow_error_set(data->error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "MPLS is not supported by driver"); +#else + const struct rte_flow_item_mpls *spec = item->spec; + const struct rte_flow_item_mpls *mask = item->mask; + struct mlx5_flow_parse *parser = data->parser; + unsigned int size = sizeof(struct ibv_flow_spec_mpls); + struct ibv_flow_spec_mpls mpls = { + .type = IBV_FLOW_SPEC_MPLS, + .size = size, + }; + + parser->inner = IBV_FLOW_SPEC_INNER; + if (parser->layer == HASH_RXQ_UDPV4 || + parser->layer == HASH_RXQ_UDPV6) { + parser->tunnel = + ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)]; + parser->out_layer = parser->layer; + } else { + parser->tunnel = + ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)]; + /* parser->out_layer stays as in GRE out_layer. */ + } + parser->layer = HASH_RXQ_TUNNEL; + if (spec) { + if (!mask) + mask = default_mask; + /* + * The verbs label field includes the entire MPLS header: + * bits 0:19 - label value field. + * bits 20:22 - traffic class field. + * bits 23 - bottom of stack bit. + * bits 24:31 - ttl field. + */ + mpls.val.label = *(const uint32_t *)spec; + mpls.mask.label = *(const uint32_t *)mask; + /* Remove unwanted bits from values. */ + mpls.val.label &= mpls.mask.label; + } + mlx5_flow_create_copy(parser, &mpls, size); + return 0; +#endif +} + /** * Convert mark/flag action to Verbs specification. * @@ -1554,6 +2067,9 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, * Internal parser structure. * @param mark_id * Mark identifier. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) @@ -1573,19 +2089,20 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id) /** * Convert count action to Verbs specification. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Pointer to MLX5 flow parser structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_flow_create_count(struct priv *priv __rte_unused, +mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused, struct mlx5_flow_parse *parser __rte_unused) { #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT + struct priv *priv = dev->data->dev_private; unsigned int size = sizeof(struct ibv_flow_spec_counter_action); struct ibv_counter_set_init_attr init_attr = {0}; struct ibv_flow_spec_counter_action counter = { @@ -1596,8 +2113,10 @@ mlx5_flow_create_count(struct priv *priv __rte_unused, init_attr.counter_set_id = 0; parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr); - if (!parser->cs) - return EINVAL; + if (!parser->cs) { + rte_errno = EINVAL; + return -rte_errno; + } counter.counter_set_handle = parser->cs->handle; mlx5_flow_create_copy(parser, &counter, size); #endif @@ -1607,8 +2126,8 @@ mlx5_flow_create_count(struct priv *priv __rte_unused, /** * Complete flow rule creation with a drop queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow @@ -1617,17 +2136,17 @@ mlx5_flow_create_count(struct priv *priv __rte_unused, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_create_action_queue_drop(struct priv *priv, +mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { + struct priv *priv = dev->data->dev_private; struct ibv_flow_spec_action_drop *drop; unsigned int size = sizeof(struct ibv_flow_spec_action_drop); - int err = 0; assert(priv->pd); assert(priv->ctx); @@ -1644,7 +2163,7 @@ priv_flow_create_action_queue_drop(struct priv *priv, parser->queue[HASH_RXQ_ETH].ibv_attr; if (parser->count) flow->cs = parser->cs; - if (!priv->dev->data->dev_started) + if (!dev->data->dev_started) return 0; parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; flow->frxq[HASH_RXQ_ETH].ibv_flow = @@ -1653,7 +2172,6 @@ priv_flow_create_action_queue_drop(struct priv *priv, if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); - err = ENOMEM; goto error; } return 0; @@ -1673,14 +2191,14 @@ error: flow->cs = NULL; parser->cs = NULL; } - return err; + return -rte_errno; } /** * Create hash Rx queues when RSS is enabled. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow @@ -1689,10 +2207,10 @@ error: * Perform verbose error reporting if not NULL. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_create_action_queue_rss(struct priv *priv, +mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) @@ -1700,46 +2218,144 @@ priv_flow_create_action_queue_rss(struct priv *priv, unsigned int i; for (i = 0; i != hash_rxq_init_n; ++i) { - uint64_t hash_fields; - if (!parser->queue[i].ibv_attr) continue; flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr; parser->queue[i].ibv_attr = NULL; - hash_fields = hash_rxq_init[i].hash_fields; - if (!priv->dev->data->dev_started) + flow->frxq[i].hash_fields = parser->queue[i].hash_fields; + if (!dev->data->dev_started) continue; flow->frxq[i].hrxq = - mlx5_priv_hrxq_get(priv, - parser->rss_conf.rss_key, - parser->rss_conf.rss_key_len, - hash_fields, - parser->queues, - parser->queues_n); + mlx5_hrxq_get(dev, + parser->rss_conf.key, + parser->rss_conf.key_len, + flow->frxq[i].hash_fields, + parser->rss_conf.queue, + parser->rss_conf.queue_num, + parser->tunnel, + parser->rss_conf.level); if (flow->frxq[i].hrxq) continue; flow->frxq[i].hrxq = - mlx5_priv_hrxq_new(priv, - parser->rss_conf.rss_key, - parser->rss_conf.rss_key_len, - hash_fields, - parser->queues, - parser->queues_n); + mlx5_hrxq_new(dev, + parser->rss_conf.key, + parser->rss_conf.key_len, + flow->frxq[i].hash_fields, + parser->rss_conf.queue, + parser->rss_conf.queue_num, + parser->tunnel, + parser->rss_conf.level); if (!flow->frxq[i].hrxq) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot create hash rxq"); - return ENOMEM; + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "cannot create hash rxq"); } } return 0; } +/** + * RXQ update after flow rule creation. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to the flow rule. + */ +static void +mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + unsigned int j; + + if (!dev->data->dev_started) + return; + for (i = 0; i != flow->rss_conf.queue_num; ++i) { + struct mlx5_rxq_data *rxq_data = (*priv->rxqs) + [(*flow->queues)[i]]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + uint8_t tunnel = PTYPE_IDX(flow->tunnel); + + rxq_data->mark |= flow->mark; + if (!tunnel) + continue; + rxq_ctrl->tunnel_types[tunnel] += 1; + /* Clear tunnel type if more than one tunnel types set. */ + for (j = 0; j != RTE_DIM(rxq_ctrl->tunnel_types); ++j) { + if (j == tunnel) + continue; + if (rxq_ctrl->tunnel_types[j] > 0) { + rxq_data->tunnel = 0; + break; + } + } + if (j == RTE_DIM(rxq_ctrl->tunnel_types)) + rxq_data->tunnel = flow->tunnel; + } +} + +/** + * Dump flow hash RX queue detail. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to the rte_flow. + * @param hrxq_idx + * Hash RX queue index. + */ +static void +mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + unsigned int hrxq_idx __rte_unused) +{ +#ifndef NDEBUG + uintptr_t spec_ptr; + uint16_t j; + char buf[256]; + uint8_t off; + uint64_t extra_hash_fields = 0; + +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (flow->tunnel && flow->rss_conf.level > 1) + extra_hash_fields = (uint32_t)IBV_RX_HASH_INNER; +#endif + spec_ptr = (uintptr_t)(flow->frxq[hrxq_idx].ibv_attr + 1); + for (j = 0, off = 0; j < flow->frxq[hrxq_idx].ibv_attr->num_of_specs; + j++) { + struct ibv_flow_spec *spec = (void *)spec_ptr; + off += sprintf(buf + off, " %x(%hu)", spec->hdr.type, + spec->hdr.size); + spec_ptr += spec->hdr.size; + } + DRV_LOG(DEBUG, + "port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p," + " hash:%" PRIx64 "/%u specs:%hhu(%hu), priority:%hu, type:%d," + " flags:%x, comp_mask:%x specs:%s", + dev->data->port_id, (void *)flow, hrxq_idx, + (void *)flow->frxq[hrxq_idx].hrxq, + (void *)flow->frxq[hrxq_idx].hrxq->qp, + (void *)flow->frxq[hrxq_idx].hrxq->ind_table, + (flow->frxq[hrxq_idx].hash_fields | extra_hash_fields), + flow->rss_conf.queue_num, + flow->frxq[hrxq_idx].ibv_attr->num_of_specs, + flow->frxq[hrxq_idx].ibv_attr->size, + flow->frxq[hrxq_idx].ibv_attr->priority, + flow->frxq[hrxq_idx].ibv_attr->type, + flow->frxq[hrxq_idx].ibv_attr->flags, + flow->frxq[hrxq_idx].ibv_attr->comp_mask, + buf); +#endif +} + /** * Complete flow rule creation. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param parser * Internal parser structure. * @param flow @@ -1748,26 +2364,28 @@ priv_flow_create_action_queue_rss(struct priv *priv, * Perform verbose error reporting if not NULL. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_create_action_queue(struct priv *priv, +mlx5_flow_create_action_queue(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser, struct rte_flow *flow, struct rte_flow_error *error) { - int err = 0; + struct priv *priv __rte_unused = dev->data->dev_private; + int ret; unsigned int i; + unsigned int flows_n = 0; assert(priv->pd); assert(priv->ctx); assert(!parser->drop); - err = priv_flow_create_action_queue_rss(priv, parser, flow, error); - if (err) + ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error); + if (ret) goto error; if (parser->count) flow->cs = parser->cs; - if (!priv->dev->data->dev_started) + if (!dev->data->dev_started) return 0; for (i = 0; i != hash_rxq_init_n; ++i) { if (!flow->frxq[i].hrxq) @@ -1775,26 +2393,24 @@ priv_flow_create_action_queue(struct priv *priv, flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); + mlx5_flow_dump(dev, flow, i); if (!flow->frxq[i].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "flow rule creation failure"); - err = ENOMEM; goto error; } - DEBUG("%p type %d QP %p ibv_flow %p", - (void *)flow, i, - (void *)flow->frxq[i].hrxq, - (void *)flow->frxq[i].ibv_flow); + ++flows_n; } - for (i = 0; i != parser->queues_n; ++i) { - struct mlx5_rxq_data *q = - (*priv->rxqs)[parser->queues[i]]; - - q->mark |= parser->mark; + if (!flows_n) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "internal error in flow creation"); + goto error; } + mlx5_flow_create_update_rxqs(dev, flow); return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ assert(flow); for (i = 0; i != hash_rxq_init_n; ++i) { if (flow->frxq[i].ibv_flow) { @@ -1803,7 +2419,7 @@ error: claim_zero(mlx5_glue->destroy_flow(ibv_flow)); } if (flow->frxq[i].hrxq) - mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq); + mlx5_hrxq_release(dev, flow->frxq[i].hrxq); if (flow->frxq[i].ibv_attr) rte_free(flow->frxq[i].ibv_attr); } @@ -1812,14 +2428,15 @@ error: flow->cs = NULL; parser->cs = NULL; } - return err; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** * Convert a flow. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @param[in] attr @@ -1832,26 +2449,27 @@ error: * Perform verbose error reporting if not NULL. * * @return - * A flow on success, NULL otherwise. + * A flow on success, NULL otherwise and rte_errno is set. */ static struct rte_flow * -priv_flow_create(struct priv *priv, - struct mlx5_flows *list, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +mlx5_flow_list_create(struct rte_eth_dev *dev, + struct mlx5_flows *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct mlx5_flow_parse parser = { .create = 1, }; struct rte_flow *flow = NULL; unsigned int i; - int err; + int ret; - err = priv_flow_convert(priv, attr, items, actions, error, &parser); - if (err) + ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser); + if (ret) goto exit; flow = rte_calloc(__func__, 1, - sizeof(*flow) + parser.queues_n * sizeof(uint16_t), + sizeof(*flow) + + parser.rss_conf.queue_num * sizeof(uint16_t), 0); if (!flow) { rte_flow_error_set(error, ENOMEM, @@ -1860,28 +2478,38 @@ priv_flow_create(struct priv *priv, "cannot allocate flow memory"); return NULL; } - /* Copy queues configuration. */ + /* Copy configuration. */ flow->queues = (uint16_t (*)[])(flow + 1); - memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t)); - flow->queues_n = parser.queues_n; + flow->tunnel = parser.tunnel; + flow->rss_conf = (struct rte_flow_action_rss){ + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = parser.rss_conf.level, + .types = parser.rss_conf.types, + .key_len = parser.rss_conf.key_len, + .queue_num = parser.rss_conf.queue_num, + .key = memcpy(flow->rss_key, parser.rss_conf.key, + sizeof(*parser.rss_conf.key) * + parser.rss_conf.key_len), + .queue = memcpy(flow->queues, parser.rss_conf.queue, + sizeof(*parser.rss_conf.queue) * + parser.rss_conf.queue_num), + }; flow->mark = parser.mark; - /* Copy RSS configuration. */ - flow->rss_conf = parser.rss_conf; - flow->rss_conf.rss_key = flow->rss_key; - memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len); /* finalise the flow. */ if (parser.drop) - err = priv_flow_create_action_queue_drop(priv, &parser, flow, + ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow, error); else - err = priv_flow_create_action_queue(priv, &parser, flow, error); - if (err) + ret = mlx5_flow_create_action_queue(dev, &parser, flow, error); + if (ret) goto exit; TAILQ_INSERT_TAIL(list, flow, next); - DEBUG("Flow created %p", (void *)flow); + DRV_LOG(DEBUG, "port %u flow created %p", dev->data->port_id, + (void *)flow); return flow; exit: - ERROR("flow creation error: %s", error->message); + DRV_LOG(ERR, "port %u flow creation error: %s", dev->data->port_id, + error->message); for (i = 0; i != hash_rxq_init_n; ++i) { if (parser.queue[i].ibv_attr) rte_free(parser.queue[i].ibv_attr); @@ -1903,14 +2531,9 @@ mlx5_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - int ret; struct mlx5_flow_parse parser = { .create = 0, }; - priv_lock(priv); - ret = priv_flow_convert(priv, attr, items, actions, error, &parser); - priv_unlock(priv); - return ret; + return mlx5_flow_convert(dev, attr, items, actions, error, &parser); } /** @@ -1927,35 +2550,60 @@ mlx5_flow_create(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct priv *priv = dev->data->dev_private; - struct rte_flow *flow; - priv_lock(priv); - flow = priv_flow_create(priv, &priv->flows, attr, items, actions, - error); - priv_unlock(priv); - return flow; + return mlx5_flow_list_create(dev, &priv->flows, attr, items, actions, + error); } /** - * Destroy a flow. + * Destroy a flow in a list. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * @param[in] flow * Flow to destroy. */ static void -priv_flow_destroy(struct priv *priv, - struct mlx5_flows *list, - struct rte_flow *flow) +mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, + struct rte_flow *flow) { + struct priv *priv = dev->data->dev_private; unsigned int i; - if (flow->drop || !flow->mark) + if (flow->drop || !dev->data->dev_started) goto free; - for (i = 0; i != flow->queues_n; ++i) { + for (i = 0; flow->tunnel && i != flow->rss_conf.queue_num; ++i) { + /* Update queue tunnel type. */ + struct mlx5_rxq_data *rxq_data = (*priv->rxqs) + [(*flow->queues)[i]]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + uint8_t tunnel = PTYPE_IDX(flow->tunnel); + + assert(rxq_ctrl->tunnel_types[tunnel] > 0); + rxq_ctrl->tunnel_types[tunnel] -= 1; + if (!rxq_ctrl->tunnel_types[tunnel]) { + /* Update tunnel type. */ + uint8_t j; + uint8_t types = 0; + uint8_t last; + + for (j = 0; j < RTE_DIM(rxq_ctrl->tunnel_types); j++) + if (rxq_ctrl->tunnel_types[j]) { + types += 1; + last = j; + } + /* Keep same if more than one tunnel types left. */ + if (types == 1) + rxq_data->tunnel = ptype_ext[last]; + else if (types == 0) + /* No tunnel type left. */ + rxq_data->tunnel = 0; + } + } + for (i = 0; flow->mark && i != flow->rss_conf.queue_num; ++i) { struct rte_flow *tmp; int mark = 0; @@ -1998,7 +2646,7 @@ free: claim_zero(mlx5_glue->destroy_flow (frxq->ibv_flow)); if (frxq->hrxq) - mlx5_priv_hrxq_release(priv, frxq->hrxq); + mlx5_hrxq_release(dev, frxq->hrxq); if (frxq->ibv_attr) rte_free(frxq->ibv_attr); } @@ -2008,53 +2656,60 @@ free: flow->cs = NULL; } TAILQ_REMOVE(list, flow, next); - DEBUG("Flow destroyed %p", (void *)flow); + DRV_LOG(DEBUG, "port %u flow destroyed %p", dev->data->port_id, + (void *)flow); rte_free(flow); } /** * Destroy all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. */ void -priv_flow_flush(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) { while (!TAILQ_EMPTY(list)) { struct rte_flow *flow; flow = TAILQ_FIRST(list); - priv_flow_destroy(priv, list, flow); + mlx5_flow_list_destroy(dev, list, flow); } } /** * Create drop queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_flow_create_drop_queue(struct priv *priv) +mlx5_flow_create_drop_queue(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq_drop *fdq = NULL; assert(priv->pd); assert(priv->ctx); fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0); if (!fdq) { - WARN("cannot allocate memory for drop queue"); - goto error; + DRV_LOG(WARNING, + "port %u cannot allocate memory for drop queue", + dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; } fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0); if (!fdq->cq) { - WARN("cannot allocate CQ for drop queue"); + DRV_LOG(WARNING, "port %u cannot allocate CQ for drop queue", + dev->data->port_id); + rte_errno = errno; goto error; } fdq->wq = mlx5_glue->create_wq @@ -2067,7 +2722,9 @@ priv_flow_create_drop_queue(struct priv *priv) .cq = fdq->cq, }); if (!fdq->wq) { - WARN("cannot allocate WQ for drop queue"); + DRV_LOG(WARNING, "port %u cannot allocate WQ for drop queue", + dev->data->port_id); + rte_errno = errno; goto error; } fdq->ind_table = mlx5_glue->create_rwq_ind_table @@ -2078,7 +2735,11 @@ priv_flow_create_drop_queue(struct priv *priv) .comp_mask = 0, }); if (!fdq->ind_table) { - WARN("cannot allocate indirection table for drop queue"); + DRV_LOG(WARNING, + "port %u cannot allocate indirection table for drop" + " queue", + dev->data->port_id); + rte_errno = errno; goto error; } fdq->qp = mlx5_glue->create_qp_ex @@ -2100,7 +2761,9 @@ priv_flow_create_drop_queue(struct priv *priv) .pd = priv->pd }); if (!fdq->qp) { - WARN("cannot allocate QP for drop queue"); + DRV_LOG(WARNING, "port %u cannot allocate QP for drop queue", + dev->data->port_id); + rte_errno = errno; goto error; } priv->flow_drop_queue = fdq; @@ -2117,18 +2780,19 @@ error: if (fdq) rte_free(fdq); priv->flow_drop_queue = NULL; - return -1; + return -rte_errno; } /** * Delete drop queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_flow_delete_drop_queue(struct priv *priv) +mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue; if (!fdq) @@ -2148,18 +2812,19 @@ priv_flow_delete_drop_queue(struct priv *priv) /** * Remove all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. */ void -priv_flow_stop(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; + unsigned int i; TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { - unsigned int i; struct mlx5_ind_table_ibv *ind_tbl = NULL; if (flow->drop) { @@ -2168,7 +2833,8 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list) claim_zero(mlx5_glue->destroy_flow (flow->frxq[HASH_RXQ_ETH].ibv_flow)); flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL; - DEBUG("Flow %p removed", (void *)flow); + DRV_LOG(DEBUG, "port %u flow %p removed", + dev->data->port_id, (void *)flow); /* Next flow. */ continue; } @@ -2198,27 +2864,41 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list) claim_zero(mlx5_glue->destroy_flow (flow->frxq[i].ibv_flow)); flow->frxq[i].ibv_flow = NULL; - mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq); + mlx5_hrxq_release(dev, flow->frxq[i].hrxq); flow->frxq[i].hrxq = NULL; } - DEBUG("Flow %p removed", (void *)flow); + DRV_LOG(DEBUG, "port %u flow %p removed", dev->data->port_id, + (void *)flow); + } + /* Cleanup Rx queue tunnel info. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *q = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(q, struct mlx5_rxq_ctrl, rxq); + + if (!q) + continue; + memset((void *)rxq_ctrl->tunnel_types, 0, + sizeof(rxq_ctrl->tunnel_types)); + q->tunnel = 0; } } /** * Add all flows. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param list * Pointer to a TAILQ flow list. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_flow_start(struct priv *priv, struct mlx5_flows *list) +mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; TAILQ_FOREACH(flow, list, next) { @@ -2230,12 +2910,14 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list) (priv->flow_drop_queue->qp, flow->frxq[HASH_RXQ_ETH].ibv_attr); if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) { - DEBUG("Flow %p cannot be applied", - (void *)flow); + DRV_LOG(DEBUG, + "port %u flow %p cannot be applied", + dev->data->port_id, (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } - DEBUG("Flow %p applied", (void *)flow); + DRV_LOG(DEBUG, "port %u flow %p applied", + dev->data->port_id, (void *)flow); /* Next flow. */ continue; } @@ -2243,41 +2925,46 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list) if (!flow->frxq[i].ibv_attr) continue; flow->frxq[i].hrxq = - mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key, - flow->rss_conf.rss_key_len, - hash_rxq_init[i].hash_fields, - (*flow->queues), - flow->queues_n); + mlx5_hrxq_get(dev, flow->rss_conf.key, + flow->rss_conf.key_len, + flow->frxq[i].hash_fields, + flow->rss_conf.queue, + flow->rss_conf.queue_num, + flow->tunnel, + flow->rss_conf.level); if (flow->frxq[i].hrxq) goto flow_create; flow->frxq[i].hrxq = - mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key, - flow->rss_conf.rss_key_len, - hash_rxq_init[i].hash_fields, - (*flow->queues), - flow->queues_n); + mlx5_hrxq_new(dev, flow->rss_conf.key, + flow->rss_conf.key_len, + flow->frxq[i].hash_fields, + flow->rss_conf.queue, + flow->rss_conf.queue_num, + flow->tunnel, + flow->rss_conf.level); if (!flow->frxq[i].hrxq) { - DEBUG("Flow %p cannot be applied", - (void *)flow); + DRV_LOG(DEBUG, + "port %u flow %p cannot create hash" + " rxq", + dev->data->port_id, (void *)flow); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } flow_create: + mlx5_flow_dump(dev, flow, i); flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); if (!flow->frxq[i].ibv_flow) { - DEBUG("Flow %p cannot be applied", - (void *)flow); + DRV_LOG(DEBUG, + "port %u flow %p type %u cannot be" + " applied", + dev->data->port_id, (void *)flow, i); rte_errno = EINVAL; - return rte_errno; + return -rte_errno; } - DEBUG("Flow %p applied", (void *)flow); } - if (!flow->mark) - continue; - for (i = 0; i != flow->queues_n; ++i) - (*priv->rxqs)[(*flow->queues)[i]]->mark = 1; + mlx5_flow_create_update_rxqs(dev, flow); } return 0; } @@ -2285,20 +2972,21 @@ flow_create: /** * Verify the flow list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return the number of flows not released. */ int -priv_flow_verify(struct priv *priv) +mlx5_flow_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_flow *flow; int ret = 0; TAILQ_FOREACH(flow, &priv->flows, next) { - DEBUG("%p: flow %p still referenced", (void *)priv, - (void *)flow); + DRV_LOG(DEBUG, "port %u flow %p still referenced", + dev->data->port_id, (void *)flow); ++ret; } return ret; @@ -2319,7 +3007,7 @@ priv_flow_verify(struct priv *priv) * A VLAN flow mask to apply. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, @@ -2351,9 +3039,20 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, .type = RTE_FLOW_ITEM_TYPE_END, }, }; + uint16_t queue[priv->reta_idx_n]; + struct rte_flow_action_rss action_rss = { + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = priv->rss_conf.rss_hf, + .key_len = priv->rss_conf.rss_key_len, + .queue_num = priv->reta_idx_n, + .key = priv->rss_conf.rss_key, + .queue = queue, + }; struct rte_flow_action actions[] = { { .type = RTE_FLOW_ACTION_TYPE_RSS, + .conf = &action_rss, }, { .type = RTE_FLOW_ACTION_TYPE_END, @@ -2362,26 +3061,17 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, struct rte_flow *flow; struct rte_flow_error error; unsigned int i; - union { - struct rte_flow_action_rss rss; - struct { - const struct rte_eth_rss_conf *rss_conf; - uint16_t num; - uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; - } local; - } action_rss; - - if (!priv->reta_idx_n) - return EINVAL; + + if (!priv->reta_idx_n) { + rte_errno = EINVAL; + return -rte_errno; + } for (i = 0; i != priv->reta_idx_n; ++i) - action_rss.local.queue[i] = (*priv->reta_idx)[i]; - action_rss.local.rss_conf = &priv->rss_conf; - action_rss.local.num = priv->reta_idx_n; - actions[0].conf = (const void *)&action_rss.rss; - flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions, - &error); + queue[i] = (*priv->reta_idx)[i]; + flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, + actions, &error); if (!flow) - return rte_errno; + return -rte_errno; return 0; } @@ -2396,7 +3086,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, * An Ethernet flow mask to apply. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_ctrl_flow(struct rte_eth_dev *dev, @@ -2415,14 +3105,11 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; - priv_lock(priv); - priv_flow_destroy(priv, &priv->flows, flow); - priv_unlock(priv); + mlx5_flow_list_destroy(dev, &priv->flows, flow); return 0; } @@ -2434,14 +3121,11 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, */ int mlx5_flow_flush(struct rte_eth_dev *dev, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; - priv_lock(priv); - priv_flow_flush(priv, &priv->flows); - priv_unlock(priv); + mlx5_flow_list_flush(dev, &priv->flows); return 0; } @@ -2455,10 +3139,10 @@ mlx5_flow_flush(struct rte_eth_dev *dev, * returned data from the counter. * * @return - * 0 on success, a errno value otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_query_count(struct ibv_counter_set *cs, +mlx5_flow_query_count(struct ibv_counter_set *cs, struct mlx5_flow_counter_stats *counter_stats, struct rte_flow_query_count *query_count, struct rte_flow_error *error) @@ -2472,15 +3156,13 @@ priv_flow_query_count(struct ibv_counter_set *cs, .out = counters, .outlen = 2 * sizeof(uint64_t), }; - int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out); + int err = mlx5_glue->query_counter_set(&query_cs_attr, &query_out); - if (res) { - rte_flow_error_set(error, -res, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot read counter"); - return -res; - } + if (err) + return rte_flow_error_set(error, err, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot read counter"); query_count->hits_set = 1; query_count->bytes_set = 1; query_count->hits = counters[0] - counter_stats->hits; @@ -2499,29 +3181,28 @@ priv_flow_query_count(struct ibv_counter_set *cs, * @see rte_flow_ops */ int -mlx5_flow_query(struct rte_eth_dev *dev, +mlx5_flow_query(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow, - enum rte_flow_action_type action __rte_unused, + const struct rte_flow_action *action __rte_unused, void *data, struct rte_flow_error *error) { - struct priv *priv = dev->data->dev_private; - int res = EINVAL; - - priv_lock(priv); if (flow->cs) { - res = priv_flow_query_count(flow->cs, - &flow->counter_stats, - (struct rte_flow_query_count *)data, - error); + int ret; + + ret = mlx5_flow_query_count(flow->cs, + &flow->counter_stats, + (struct rte_flow_query_count *)data, + error); + if (ret) + return ret; } else { - rte_flow_error_set(error, res, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "no counter found for flow"); + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "no counter found for flow"); } - priv_unlock(priv); - return -res; + return 0; } #endif @@ -2538,48 +3219,50 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - priv_lock(priv); if (dev->data->dev_started) { rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "port must be stopped first"); - priv_unlock(priv); return -rte_errno; } priv->isolated = !!enable; if (enable) - priv->dev->dev_ops = &mlx5_dev_ops_isolate; + dev->dev_ops = &mlx5_dev_ops_isolate; else - priv->dev->dev_ops = &mlx5_dev_ops; - priv_unlock(priv); + dev->dev_ops = &mlx5_dev_ops; return 0; } /** * Convert a flow director filter to a generic flow. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Flow director filter to add. * @param attributes * Generic flow parameters structure. * * @return - * 0 on success, errno value on error. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_convert(struct priv *priv, +mlx5_fdir_filter_convert(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter, struct mlx5_fdir *attributes) { + struct priv *priv = dev->data->dev_private; const struct rte_eth_fdir_input *input = &fdir_filter->input; + const struct rte_eth_fdir_masks *mask = + &dev->data->dev_conf.fdir_conf.mask; /* Validate queue number. */ if (fdir_filter->action.rx_queue >= priv->rxqs_n) { - ERROR("invalid queue number %d", fdir_filter->action.rx_queue); - return EINVAL; + DRV_LOG(ERR, "port %u invalid queue number %d", + dev->data->port_id, fdir_filter->action.rx_queue); + rte_errno = EINVAL; + return -rte_errno; } attributes->attr.ingress = 1; attributes->items[0] = (struct rte_flow_item) { @@ -2600,144 +3283,140 @@ priv_fdir_filter_convert(struct priv *priv, }; break; default: - ERROR("invalid behavior %d", fdir_filter->action.behavior); - return ENOTSUP; + DRV_LOG(ERR, "port %u invalid behavior %d", + dev->data->port_id, + fdir_filter->action.behavior); + rte_errno = ENOTSUP; + return -rte_errno; } attributes->queue.index = fdir_filter->action.rx_queue; + /* Handle L3. */ switch (fdir_filter->input.flow_type) { case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.udp4_flow.ip.src_ip, - .dst_addr = input->flow.udp4_flow.ip.dst_ip, - .time_to_live = input->flow.udp4_flow.ip.ttl, - .type_of_service = input->flow.udp4_flow.ip.tos, - .next_proto_id = input->flow.udp4_flow.ip.proto, + .src_addr = input->flow.ip4_flow.src_ip, + .dst_addr = input->flow.ip4_flow.dst_ip, + .time_to_live = input->flow.ip4_flow.ttl, + .type_of_service = input->flow.ip4_flow.tos, + .next_proto_id = input->flow.ip4_flow.proto, }; - attributes->l4.udp.hdr = (struct udp_hdr){ - .src_port = input->flow.udp4_flow.src_port, - .dst_port = input->flow.udp4_flow.dst_port, + attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){ + .src_addr = mask->ipv4_mask.src_ip, + .dst_addr = mask->ipv4_mask.dst_ip, + .time_to_live = mask->ipv4_mask.ttl, + .type_of_service = mask->ipv4_mask.tos, + .next_proto_id = mask->ipv4_mask.proto, }; attributes->items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_IPV4, .spec = &attributes->l3, - .mask = &attributes->l3, + .mask = &attributes->l3_mask, + }; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + attributes->l3.ipv6.hdr = (struct ipv6_hdr){ + .hop_limits = input->flow.ipv6_flow.hop_limits, + .proto = input->flow.ipv6_flow.proto, + }; + + memcpy(attributes->l3.ipv6.hdr.src_addr, + input->flow.ipv6_flow.src_ip, + RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); + memcpy(attributes->l3.ipv6.hdr.dst_addr, + input->flow.ipv6_flow.dst_ip, + RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); + memcpy(attributes->l3_mask.ipv6.hdr.src_addr, + mask->ipv6_mask.src_ip, + RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); + memcpy(attributes->l3_mask.ipv6.hdr.dst_addr, + mask->ipv6_mask.dst_ip, + RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr)); + attributes->items[1] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &attributes->l3, + .mask = &attributes->l3_mask, + }; + break; + default: + DRV_LOG(ERR, "port %u invalid flow type%d", + dev->data->port_id, fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; + } + /* Handle L4. */ + switch (fdir_filter->input.flow_type) { + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + attributes->l4.udp.hdr = (struct udp_hdr){ + .src_port = input->flow.udp4_flow.src_port, + .dst_port = input->flow.udp4_flow.dst_port, + }; + attributes->l4_mask.udp.hdr = (struct udp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_UDP, .spec = &attributes->l4, - .mask = &attributes->l4, + .mask = &attributes->l4_mask, }; break; case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.tcp4_flow.ip.src_ip, - .dst_addr = input->flow.tcp4_flow.ip.dst_ip, - .time_to_live = input->flow.tcp4_flow.ip.ttl, - .type_of_service = input->flow.tcp4_flow.ip.tos, - .next_proto_id = input->flow.tcp4_flow.ip.proto, - }; attributes->l4.tcp.hdr = (struct tcp_hdr){ .src_port = input->flow.tcp4_flow.src_port, .dst_port = input->flow.tcp4_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV4, - .spec = &attributes->l3, - .mask = &attributes->l3, + attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_TCP, .spec = &attributes->l4, - .mask = &attributes->l4, - }; - break; - case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: - attributes->l3.ipv4.hdr = (struct ipv4_hdr){ - .src_addr = input->flow.ip4_flow.src_ip, - .dst_addr = input->flow.ip4_flow.dst_ip, - .time_to_live = input->flow.ip4_flow.ttl, - .type_of_service = input->flow.ip4_flow.tos, - .next_proto_id = input->flow.ip4_flow.proto, - }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV4, - .spec = &attributes->l3, - .mask = &attributes->l3, + .mask = &attributes->l4_mask, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.udp6_flow.ip.hop_limits, - .proto = input->flow.udp6_flow.ip.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.udp6_flow.ip.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.udp6_flow.ip.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); attributes->l4.udp.hdr = (struct udp_hdr){ .src_port = input->flow.udp6_flow.src_port, .dst_port = input->flow.udp6_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, - .mask = &attributes->l3, + attributes->l4_mask.udp.hdr = (struct udp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_UDP, .spec = &attributes->l4, - .mask = &attributes->l4, + .mask = &attributes->l4_mask, }; break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.tcp6_flow.ip.hop_limits, - .proto = input->flow.tcp6_flow.ip.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.tcp6_flow.ip.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.tcp6_flow.ip.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); attributes->l4.tcp.hdr = (struct tcp_hdr){ .src_port = input->flow.tcp6_flow.src_port, .dst_port = input->flow.tcp6_flow.dst_port, }; - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, - .mask = &attributes->l3, + attributes->l4_mask.tcp.hdr = (struct tcp_hdr){ + .src_port = mask->src_port_mask, + .dst_port = mask->dst_port_mask, }; attributes->items[2] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_TCP, .spec = &attributes->l4, - .mask = &attributes->l4, + .mask = &attributes->l4_mask, }; break; + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: - attributes->l3.ipv6.hdr = (struct ipv6_hdr){ - .hop_limits = input->flow.ipv6_flow.hop_limits, - .proto = input->flow.ipv6_flow.proto, - }; - memcpy(attributes->l3.ipv6.hdr.src_addr, - input->flow.ipv6_flow.src_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - memcpy(attributes->l3.ipv6.hdr.dst_addr, - input->flow.ipv6_flow.dst_ip, - RTE_DIM(attributes->l3.ipv6.hdr.src_addr)); - attributes->items[1] = (struct rte_flow_item){ - .type = RTE_FLOW_ITEM_TYPE_IPV6, - .spec = &attributes->l3, - .mask = &attributes->l3, - }; break; default: - ERROR("invalid flow type%d", - fdir_filter->input.flow_type); - return ENOTSUP; + DRV_LOG(ERR, "port %u invalid flow type%d", + dev->data->port_id, fdir_filter->input.flow_type); + rte_errno = ENOTSUP; + return -rte_errno; } return 0; } @@ -2745,18 +3424,19 @@ priv_fdir_filter_convert(struct priv *priv, /** * Add new flow director filter and store it in list. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Flow director filter to add. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_add(struct priv *priv, +mlx5_fdir_filter_add(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { + struct priv *priv = dev->data->dev_private; struct mlx5_fdir attributes = { .attr.group = 0, .l2_mask = { @@ -2772,41 +3452,40 @@ priv_fdir_filter_add(struct priv *priv, struct rte_flow *flow; int ret; - ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); + ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) - return -ret; - ret = priv_flow_convert(priv, &attributes.attr, attributes.items, + return ret; + ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) - return -ret; - flow = priv_flow_create(priv, - &priv->flows, - &attributes.attr, - attributes.items, - attributes.actions, - &error); + return ret; + flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, + attributes.items, attributes.actions, + &error); if (flow) { - DEBUG("FDIR created %p", (void *)flow); + DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id, + (void *)flow); return 0; } - return ENOTSUP; + return -rte_errno; } /** * Delete specific filter. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Filter to be deleted. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_delete(struct priv *priv, +mlx5_fdir_filter_delete(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { + struct priv *priv = dev->data->dev_private; struct mlx5_fdir attributes = { .attr.group = 0, }; @@ -2819,10 +3498,10 @@ priv_fdir_filter_delete(struct priv *priv, unsigned int i; int ret; - ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes); + ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) - return -ret; - ret = priv_flow_convert(priv, &attributes.attr, attributes.items, + return ret; + ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items, attributes.actions, &error, &parser); if (ret) goto exit; @@ -2850,11 +3529,14 @@ priv_fdir_filter_delete(struct priv *priv, struct ibv_spec_header *flow_h; void *flow_spec; unsigned int specs_n; + unsigned int queue_id = parser.drop ? HASH_RXQ_ETH : + parser.layer; - attr = parser.queue[HASH_RXQ_ETH].ibv_attr; - flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr; + attr = parser.queue[queue_id].ibv_attr; + flow_attr = flow->frxq[queue_id].ibv_attr; /* Compare first the attributes. */ - if (memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr))) + if (!flow_attr || + memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr))) continue; if (attr->num_of_specs == 0) continue; @@ -2879,67 +3561,70 @@ wrong_flow: /* The flow does not match. */ continue; } + ret = rte_errno; /* Save rte_errno before cleanup. */ if (flow) - priv_flow_destroy(priv, &priv->flows, flow); + mlx5_flow_list_destroy(dev, &priv->flows, flow); exit: for (i = 0; i != hash_rxq_init_n; ++i) { if (parser.queue[i].ibv_attr) rte_free(parser.queue[i].ibv_attr); } - return -ret; + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** * Update queue for specific filter. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param fdir_filter * Filter to be updated. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_filter_update(struct priv *priv, +mlx5_fdir_filter_update(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *fdir_filter) { int ret; - ret = priv_fdir_filter_delete(priv, fdir_filter); + ret = mlx5_fdir_filter_delete(dev, fdir_filter); if (ret) return ret; - ret = priv_fdir_filter_add(priv, fdir_filter); - return ret; + return mlx5_fdir_filter_add(dev, fdir_filter); } /** * Flush all filters. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. */ static void -priv_fdir_filter_flush(struct priv *priv) +mlx5_fdir_filter_flush(struct rte_eth_dev *dev) { - priv_flow_flush(priv, &priv->flows); + struct priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->flows); } /** * Get flow director information. * - * @param priv - * Private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] fdir_info * Resulting flow director information. */ static void -priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) +mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info) { struct rte_eth_fdir_masks *mask = - &priv->dev->data->dev_conf.fdir_conf.mask; + &dev->data->dev_conf.fdir_conf.mask; - fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode; + fdir_info->mode = dev->data->dev_conf.fdir_conf.mode; fdir_info->guarant_spc = 0; rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask)); fdir_info->max_flexpayload = 0; @@ -2953,54 +3638,52 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info) /** * Deal with flow director operations. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param filter_op * Operation to perform. * @param arg * Pointer to operation-specific structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) +mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op, + void *arg) { enum rte_fdir_mode fdir_mode = - priv->dev->data->dev_conf.fdir_conf.mode; - int ret = 0; + dev->data->dev_conf.fdir_conf.mode; if (filter_op == RTE_ETH_FILTER_NOP) return 0; if (fdir_mode != RTE_FDIR_MODE_PERFECT && fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { - ERROR("%p: flow director mode %d not supported", - (void *)priv, fdir_mode); - return EINVAL; + DRV_LOG(ERR, "port %u flow director mode %d not supported", + dev->data->port_id, fdir_mode); + rte_errno = EINVAL; + return -rte_errno; } switch (filter_op) { case RTE_ETH_FILTER_ADD: - ret = priv_fdir_filter_add(priv, arg); - break; + return mlx5_fdir_filter_add(dev, arg); case RTE_ETH_FILTER_UPDATE: - ret = priv_fdir_filter_update(priv, arg); - break; + return mlx5_fdir_filter_update(dev, arg); case RTE_ETH_FILTER_DELETE: - ret = priv_fdir_filter_delete(priv, arg); - break; + return mlx5_fdir_filter_delete(dev, arg); case RTE_ETH_FILTER_FLUSH: - priv_fdir_filter_flush(priv); + mlx5_fdir_filter_flush(dev); break; case RTE_ETH_FILTER_INFO: - priv_fdir_info_get(priv, arg); + mlx5_fdir_info_get(dev, arg); break; default: - DEBUG("%p: unknown operation %u", (void *)priv, - filter_op); - ret = EINVAL; - break; + DRV_LOG(DEBUG, "port %u unknown operation %u", + dev->data->port_id, filter_op); + rte_errno = EINVAL; + return -rte_errno; } - return ret; + return 0; } /** @@ -3016,7 +3699,7 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg) * Pointer to operation-specific structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, @@ -3024,24 +3707,74 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - int ret = EINVAL; - struct priv *priv = dev->data->dev_private; - switch (filter_type) { case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; + if (filter_op != RTE_ETH_FILTER_GET) { + rte_errno = EINVAL; + return -rte_errno; + } *(const void **)arg = &mlx5_flow_ops; return 0; case RTE_ETH_FILTER_FDIR: - priv_lock(priv); - ret = priv_fdir_ctrl_func(priv, filter_op, arg); - priv_unlock(priv); - break; + return mlx5_fdir_ctrl_func(dev, filter_op, arg); default: - ERROR("%p: filter type (%d) not supported", - (void *)dev, filter_type); - break; + DRV_LOG(ERR, "port %u filter type (%d) not supported", + dev->data->port_id, filter_type); + rte_errno = ENOTSUP; + return -rte_errno; } - return -ret; + return 0; +} + +/** + * Detect number of Verbs flow priorities supported. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * number of supported Verbs flow priority. + */ +unsigned int +mlx5_get_max_verbs_prio(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + unsigned int verb_priorities = MLX5_VERBS_FLOW_PRIO_8; + struct { + struct ibv_flow_attr attr; + struct ibv_flow_spec_eth eth; + struct ibv_flow_spec_action_drop drop; + } flow_attr = { + .attr = { + .num_of_specs = 2, + }, + .eth = { + .type = IBV_FLOW_SPEC_ETH, + .size = sizeof(struct ibv_flow_spec_eth), + }, + .drop = { + .size = sizeof(struct ibv_flow_spec_action_drop), + .type = IBV_FLOW_SPEC_ACTION_DROP, + }, + }; + struct ibv_flow *flow; + + do { + flow_attr.attr.priority = verb_priorities - 1; + flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp, + &flow_attr.attr); + if (flow) { + claim_zero(mlx5_glue->destroy_flow(flow)); + /* Try more priorities. */ + verb_priorities *= 2; + } else { + /* Failed, restore last right number. */ + verb_priorities /= 2; + break; + } + } while (1); + DRV_LOG(DEBUG, "port %u Verbs flow priorities: %d," + " user flow priorities: %d", + dev->data->port_id, verb_priorities, MLX5_CTRL_FLOW_PRIORITY); + return verb_priorities; } diff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c index 1c4396ad..c7965e51 100644 --- a/drivers/net/mlx5/mlx5_glue.c +++ b/drivers/net/mlx5/mlx5_glue.c @@ -1,12 +1,18 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2018 6WIND S.A. - * Copyright 2018 Mellanox Technologies, Ltd. + * Copyright 2018 Mellanox Technologies, Ltd */ #include #include #include +/* + * Not needed by this file; included to work around the lack of off_t + * definition for mlx5dv.h with unpatched rdma-core versions. + */ +#include + /* Verbs headers do not support -pedantic. */ #ifdef PEDANTIC #pragma GCC diagnostic ignored "-Wpedantic" @@ -287,6 +293,21 @@ mlx5_glue_dv_create_cq(struct ibv_context *context, return mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr); } +static struct ibv_wq * +mlx5_glue_dv_create_wq(struct ibv_context *context, + struct ibv_wq_init_attr *wq_attr, + struct mlx5dv_wq_init_attr *mlx5_wq_attr) +{ +#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + (void)context; + (void)wq_attr; + (void)mlx5_wq_attr; + return NULL; +#else + return mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr); +#endif +} + static int mlx5_glue_dv_query_device(struct ibv_context *ctx, struct mlx5dv_context *attrs_out) @@ -307,6 +328,21 @@ mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type) return mlx5dv_init_obj(obj, obj_type); } +static struct ibv_qp * +mlx5_glue_dv_create_qp(struct ibv_context *context, + struct ibv_qp_init_attr_ex *qp_init_attr_ex, + struct mlx5dv_qp_init_attr *dv_qp_init_attr) +{ +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + return mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr); +#else + (void)context; + (void)qp_init_attr_ex; + (void)dv_qp_init_attr; + return NULL; +#endif +} + const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){ .version = MLX5_GLUE_VERSION, .fork_init = mlx5_glue_fork_init, @@ -347,7 +383,9 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){ .port_state_str = mlx5_glue_port_state_str, .cq_ex_to_cq = mlx5_glue_cq_ex_to_cq, .dv_create_cq = mlx5_glue_dv_create_cq, + .dv_create_wq = mlx5_glue_dv_create_wq, .dv_query_device = mlx5_glue_dv_query_device, .dv_set_context_attr = mlx5_glue_dv_set_context_attr, .dv_init_obj = mlx5_glue_dv_init_obj, + .dv_create_qp = mlx5_glue_dv_create_qp, }; diff --git a/drivers/net/mlx5/mlx5_glue.h b/drivers/net/mlx5/mlx5_glue.h index b5efee3b..e584d367 100644 --- a/drivers/net/mlx5/mlx5_glue.h +++ b/drivers/net/mlx5/mlx5_glue.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2018 6WIND S.A. - * Copyright 2018 Mellanox Technologies, Ltd. + * Copyright 2018 Mellanox Technologies, Ltd */ #ifndef MLX5_GLUE_H_ @@ -31,6 +31,14 @@ struct ibv_counter_set_init_attr; struct ibv_query_counter_set_attr; #endif +#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT +struct mlx5dv_qp_init_attr; +#endif + +#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT +struct mlx5dv_wq_init_attr; +#endif + /* LIB_GLUE_VERSION must be updated every time this structure is modified. */ struct mlx5_glue { const char *version; @@ -100,12 +108,20 @@ struct mlx5_glue { (struct ibv_context *context, struct ibv_cq_init_attr_ex *cq_attr, struct mlx5dv_cq_init_attr *mlx5_cq_attr); + struct ibv_wq *(*dv_create_wq) + (struct ibv_context *context, + struct ibv_wq_init_attr *wq_attr, + struct mlx5dv_wq_init_attr *mlx5_wq_attr); int (*dv_query_device)(struct ibv_context *ctx_in, struct mlx5dv_context *attrs_out); int (*dv_set_context_attr)(struct ibv_context *ibv_ctx, enum mlx5dv_set_ctx_attr_type type, void *attr); int (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type); + struct ibv_qp *(*dv_create_qp) + (struct ibv_context *context, + struct ibv_qp_init_attr_ex *qp_init_attr_ex, + struct mlx5dv_qp_init_attr *dv_qp_init_attr); }; const struct mlx5_glue *mlx5_glue; diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index e8a8d459..672a4761 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -35,44 +35,52 @@ /** * Get MAC address by querying netdevice. * - * @param[in] priv - * struct priv for the requested device. + * @param[in] dev + * Pointer to Ethernet device. * @param[out] mac * MAC address output buffer. * * @return - * 0 on success, -1 on failure and errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]) +mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]) { struct ifreq request; + int ret; - if (priv_ifreq(priv, SIOCGIFHWADDR, &request)) - return -1; + ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request); + if (ret) + return ret; memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); return 0; } /** - * DPDK callback to remove a MAC address. + * Remove a MAC address from the internal array. * * @param dev * Pointer to Ethernet device structure. * @param index * MAC address index. */ -void -mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +static void +mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) { + struct priv *priv = dev->data->dev_private; + const int vf = priv->config.vf; + assert(index < MLX5_MAX_MAC_ADDRESSES); + if (is_zero_ether_addr(&dev->data->mac_addrs[index])) + return; + if (vf) + mlx5_nl_mac_addr_remove(dev, &dev->data->mac_addrs[index], + index); memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr)); - if (!dev->data->promiscuous) - mlx5_traffic_restart(dev); } /** - * DPDK callback to add a MAC address. + * Adds a MAC address to the internal array. * * @param dev * Pointer to Ethernet device structure. @@ -80,21 +88,23 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) * MAC address to register. * @param index * MAC address index. - * @param vmdq - * VMDq pool index to associate address with (ignored). * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -int -mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, - uint32_t index, uint32_t vmdq) +static int +mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index) { + struct priv *priv = dev->data->dev_private; + const int vf = priv->config.vf; unsigned int i; - int ret = 0; - (void)vmdq; assert(index < MLX5_MAX_MAC_ADDRESSES); + if (is_zero_ether_addr(mac)) { + rte_errno = EINVAL; + return -rte_errno; + } /* First, make sure this address isn't already configured. */ for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) { /* Skip this index, it's going to be reconfigured. */ @@ -103,12 +113,74 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac))) continue; /* Address already configured elsewhere, return with error. */ - return EADDRINUSE; + rte_errno = EADDRINUSE; + return -rte_errno; + } + if (vf) { + int ret = mlx5_nl_mac_addr_add(dev, mac, index); + + if (ret) + return ret; } dev->data->mac_addrs[index] = *mac; + return 0; +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +void +mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + int ret; + + if (index >= MLX5_MAX_UC_MAC_ADDRESSES) + return; + mlx5_internal_mac_addr_remove(dev, index); + if (!dev->data->promiscuous) { + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot restart traffic: %s", + dev->data->port_id, strerror(rte_errno)); + } +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (ignored). + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index, uint32_t vmdq __rte_unused) +{ + int ret; + + if (index >= MLX5_MAX_UC_MAC_ADDRESSES) { + rte_errno = EINVAL; + return -rte_errno; + } + ret = mlx5_internal_mac_addr_add(dev, mac, index); + if (ret < 0) + return ret; if (!dev->data->promiscuous) - mlx5_traffic_restart(dev); - return ret; + return mlx5_traffic_restart(dev); + return 0; } /** @@ -118,10 +190,43 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, * Pointer to Ethernet device structure. * @param mac_addr * MAC address to register. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -void +int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { - DEBUG("%p: setting primary MAC address", (void *)dev); - mlx5_mac_addr_add(dev, mac_addr, 0, 0); + DRV_LOG(DEBUG, "port %u setting primary MAC address", + dev->data->port_id); + return mlx5_mac_addr_add(dev, mac_addr, 0, 0); +} + +/** + * DPDK callback to set multicast addresses list. + * + * @see rte_eth_dev_set_mc_addr_list() + */ +int +mlx5_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, uint32_t nb_mc_addr) +{ + uint32_t i; + int ret; + + if (nb_mc_addr >= MLX5_MAX_MC_MAC_ADDRESSES) { + rte_errno = ENOSPC; + return -rte_errno; + } + for (i = MLX5_MAX_UC_MAC_ADDRESSES; i != MLX5_MAX_MAC_ADDRESSES; ++i) + mlx5_internal_mac_addr_remove(dev, i); + i = MLX5_MAX_UC_MAC_ADDRESSES; + while (nb_mc_addr--) { + ret = mlx5_internal_mac_addr_add(dev, mc_addr_set++, i++); + if (ret) + return ret; + } + if (!dev->data->promiscuous) + return mlx5_traffic_restart(dev); + return 0; } diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 857dfcd8..08105a44 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2016 6WIND S.A. - * Copyright 2016 Mellanox. + * Copyright 2016 Mellanox Technologies, Ltd */ #ifdef PEDANTIC @@ -13,362 +13,1183 @@ #include #include +#include #include "mlx5.h" +#include "mlx5_mr.h" #include "mlx5_rxtx.h" #include "mlx5_glue.h" -struct mlx5_check_mempool_data { +struct mr_find_contig_memsegs_data { + uintptr_t addr; + uintptr_t start; + uintptr_t end; + const struct rte_memseg_list *msl; +}; + +struct mr_update_mp_data { + struct rte_eth_dev *dev; + struct mlx5_mr_ctrl *mr_ctrl; int ret; - char *start; - char *end; }; -/* Called by mlx5_check_mempool() when iterating the memory chunks. */ -static void -mlx5_check_mempool_cb(struct rte_mempool *mp, - void *opaque, struct rte_mempool_memhdr *memhdr, - unsigned int mem_idx) +/** + * Expand B-tree table to a given size. Can't be called with holding + * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc(). + * + * @param bt + * Pointer to B-tree structure. + * @param n + * Number of entries for expansion. + * + * @return + * 0 on success, -1 on failure. + */ +static int +mr_btree_expand(struct mlx5_mr_btree *bt, int n) { - struct mlx5_check_mempool_data *data = opaque; + void *mem; + int ret = 0; - (void)mp; - (void)mem_idx; + if (n <= bt->size) + return ret; + /* + * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is + * used inside if there's no room to expand. Because this is a quite + * rare case and a part of very slow path, it is very acceptable. + * Initially cache_bh[] will be given practically enough space and once + * it is expanded, expansion wouldn't be needed again ever. + */ + mem = rte_realloc(bt->table, n * sizeof(struct mlx5_mr_cache), 0); + if (mem == NULL) { + /* Not an error, B-tree search will be skipped. */ + DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table", + (void *)bt); + ret = -1; + } else { + DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n); + bt->table = mem; + bt->size = n; + } + return ret; +} - /* It already failed, skip the next chunks. */ - if (data->ret != 0) - return; - /* It is the first chunk. */ - if (data->start == NULL && data->end == NULL) { - data->start = memhdr->addr; - data->end = data->start + memhdr->len; - return; +/** + * Look up LKey from given B-tree lookup table, store the last index and return + * searched LKey. + * + * @param bt + * Pointer to B-tree structure. + * @param[out] idx + * Pointer to index. Even on search failure, returns index where it stops + * searching so that index can be used when inserting a new entry. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static uint32_t +mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr) +{ + struct mlx5_mr_cache *lkp_tbl; + uint16_t n; + uint16_t base = 0; + + assert(bt != NULL); + lkp_tbl = *bt->table; + n = bt->len; + /* First entry must be NULL for comparison. */ + assert(bt->len > 0 || (lkp_tbl[0].start == 0 && + lkp_tbl[0].lkey == UINT32_MAX)); + /* Binary search. */ + do { + register uint16_t delta = n >> 1; + + if (addr < lkp_tbl[base + delta].start) { + n = delta; + } else { + base += delta; + n -= delta; + } + } while (n > 1); + assert(addr >= lkp_tbl[base].start); + *idx = base; + if (addr < lkp_tbl[base].end) + return lkp_tbl[base].lkey; + /* Not found. */ + return UINT32_MAX; +} + +/** + * Insert an entry to B-tree lookup table. + * + * @param bt + * Pointer to B-tree structure. + * @param entry + * Pointer to new entry to insert. + * + * @return + * 0 on success, -1 on failure. + */ +static int +mr_btree_insert(struct mlx5_mr_btree *bt, struct mlx5_mr_cache *entry) +{ + struct mlx5_mr_cache *lkp_tbl; + uint16_t idx = 0; + size_t shift; + + assert(bt != NULL); + assert(bt->len <= bt->size); + assert(bt->len > 0); + lkp_tbl = *bt->table; + /* Find out the slot for insertion. */ + if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) { + DRV_LOG(DEBUG, + "abort insertion to B-tree(%p): already exist at" + " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + /* Already exist, return. */ + return 0; } - if (data->end == memhdr->addr) { - data->end += memhdr->len; - return; + /* If table is full, return error. */ + if (unlikely(bt->len == bt->size)) { + bt->overflow = 1; + return -1; } - if (data->start == (char *)memhdr->addr + memhdr->len) { - data->start -= memhdr->len; + /* Insert entry. */ + ++idx; + shift = (bt->len - idx) * sizeof(struct mlx5_mr_cache); + if (shift) + memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift); + lkp_tbl[idx] = *entry; + bt->len++; + DRV_LOG(DEBUG, + "inserted B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); + return 0; +} + +/** + * Initialize B-tree and allocate memory for lookup table. + * + * @param bt + * Pointer to B-tree structure. + * @param n + * Number of entries to allocate. + * @param socket + * NUMA socket on which memory must be allocated. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket) +{ + if (bt == NULL) { + rte_errno = EINVAL; + return -rte_errno; + } + assert(!bt->table && !bt->size); + memset(bt, 0, sizeof(*bt)); + bt->table = rte_calloc_socket("B-tree table", + n, sizeof(struct mlx5_mr_cache), + 0, socket); + if (bt->table == NULL) { + rte_errno = ENOMEM; + DRV_LOG(ERR, + "failed to allocate memory for btree cache on socket %d", + socket); + return -rte_errno; + } + bt->size = n; + /* First entry must be NULL for binary search. */ + (*bt->table)[bt->len++] = (struct mlx5_mr_cache) { + .lkey = UINT32_MAX, + }; + DRV_LOG(DEBUG, "initialized B-tree %p with table %p", + (void *)bt, (void *)bt->table); + return 0; +} + +/** + * Free B-tree resources. + * + * @param bt + * Pointer to B-tree structure. + */ +void +mlx5_mr_btree_free(struct mlx5_mr_btree *bt) +{ + if (bt == NULL) + return; + DRV_LOG(DEBUG, "freeing B-tree %p with table %p", + (void *)bt, (void *)bt->table); + rte_free(bt->table); + memset(bt, 0, sizeof(*bt)); +} + +/** + * Dump all the entries in a B-tree + * + * @param bt + * Pointer to B-tree structure. + */ +static void +mlx5_mr_btree_dump(struct mlx5_mr_btree *bt) +{ + int idx; + struct mlx5_mr_cache *lkp_tbl; + + if (bt == NULL) return; + lkp_tbl = *bt->table; + for (idx = 0; idx < bt->len; ++idx) { + struct mlx5_mr_cache *entry = &lkp_tbl[idx]; + + DRV_LOG(DEBUG, + "B-tree(%p)[%u]," + " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x", + (void *)bt, idx, entry->start, entry->end, entry->lkey); } - /* Error, mempool is not virtually contiguous. */ - data->ret = -1; } /** - * Check if a mempool can be used: it must be virtually contiguous. + * Find virtually contiguous memory chunk in a given MR. * - * @param[in] mp - * Pointer to memory pool. - * @param[out] start - * Pointer to the start address of the mempool virtual memory area - * @param[out] end - * Pointer to the end address of the mempool virtual memory area + * @param dev + * Pointer to MR structure. + * @param[out] entry + * Pointer to returning MR cache entry. If not found, this will not be + * updated. + * @param start_idx + * Start index of the memseg bitmap. * * @return - * 0 on success (mempool is virtually contiguous), -1 on error. + * Next index to go on lookup. */ -static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, - uintptr_t *end) +static int +mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry, + int base_idx) { - struct mlx5_check_mempool_data data; + uintptr_t start = 0; + uintptr_t end = 0; + uint32_t idx = 0; - memset(&data, 0, sizeof(data)); - rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data); - *start = (uintptr_t)data.start; - *end = (uintptr_t)data.end; + for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { + if (rte_bitmap_get(mr->ms_bmp, idx)) { + const struct rte_memseg_list *msl; + const struct rte_memseg *ms; - return data.ret; + msl = mr->msl; + ms = rte_fbarray_get(&msl->memseg_arr, + mr->ms_base_idx + idx); + assert(msl->page_sz == ms->hugepage_sz); + if (!start) + start = ms->addr_64; + end = ms->addr_64 + ms->hugepage_sz; + } else if (start) { + /* Passed the end of a fragment. */ + break; + } + } + if (start) { + /* Found one chunk. */ + entry->start = start; + entry->end = end; + entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey); + } + return idx; } /** - * Register a Memory Region (MR) <-> Memory Pool (MP) association in - * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. + * Insert a MR to the global B-tree cache. It may fail due to low-on-memory. + * Then, this entry will have to be searched by mr_lookup_dev_list() in + * mlx5_mr_create() on miss. * - * This function should only be called by txq_mp2mr(). + * @param dev + * Pointer to Ethernet device. + * @param mr + * Pointer to MR to insert. * - * @param priv - * Pointer to private structure. - * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. - * @param idx - * Index of the next available entry. + * @return + * 0 on success, -1 on failure. + */ +static int +mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr) +{ + struct priv *priv = dev->data->dev_private; + unsigned int n; + + DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache", + dev->data->port_id, (void *)mr); + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx5_mr_cache entry = { 0, }; + + /* Find a contiguous chunk and advance the index. */ + n = mr_find_next_chunk(mr, &entry, n); + if (!entry.end) + break; + if (mr_btree_insert(&priv->mr.cache, &entry) < 0) { + /* + * Overflowed, but the global table cannot be expanded + * because of deadlock. + */ + return -1; + } + } + return 0; +} + +/** + * Look up address in the original global MR list. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry. If no match, this will not be updated. + * @param addr + * Search key. * * @return - * mr on success, NULL on failure. + * Found MR on match, NULL otherwise. */ -struct mlx5_mr* -priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, - struct rte_mempool *mp, unsigned int idx) +static struct mlx5_mr * +mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, + uintptr_t addr) { - struct mlx5_txq_ctrl *txq_ctrl = - container_of(txq, struct mlx5_txq_ctrl, txq); + struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; - /* Add a new entry, register MR first. */ - DEBUG("%p: discovered new memory pool \"%s\" (%p)", - (void *)txq_ctrl, mp->name, (void *)mp); - mr = priv_mr_get(priv, mp); - if (mr == NULL) { - if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - DEBUG("Using unregistered mempool 0x%p(%s) in secondary process," - " please create mempool before rte_eth_dev_start()", - (void *)mp, mp->name); - return NULL; + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) { + unsigned int n; + + if (mr->ms_n == 0) + continue; + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx5_mr_cache ret = { 0, }; + + n = mr_find_next_chunk(mr, &ret, n); + if (addr >= ret.start && addr < ret.end) { + /* Found. */ + *entry = ret; + return mr; + } } - mr = priv_mr_new(priv, mp); - } - if (unlikely(mr == NULL)) { - DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", - (void *)txq_ctrl); - return NULL; - } - if (unlikely(idx == RTE_DIM(txq->mp2mr))) { - /* Table is full, remove oldest entry. */ - DEBUG("%p: MR <-> MP table full, dropping oldest entry.", - (void *)txq_ctrl); - --idx; - priv_mr_release(priv, txq->mp2mr[0]); - memmove(&txq->mp2mr[0], &txq->mp2mr[1], - (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); } - /* Store the new entry. */ - txq_ctrl->txq.mp2mr[idx] = mr; - DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, - (void *)txq_ctrl, mp->name, (void *)mp, - txq_ctrl->txq.mp2mr[idx]->lkey); - return mr; + return NULL; } /** - * Register a Memory Region (MR) <-> Memory Pool (MP) association in - * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. - * - * This function should only be called by txq_mp2mr(). + * Look up address on device. * - * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. - * @param idx - * Index of the next available entry. + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry. If no match, this will not be updated. + * @param addr + * Search key. * * @return - * mr on success, NULL on failure. + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. */ -struct mlx5_mr* -mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, - unsigned int idx) +static uint32_t +mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, + uintptr_t addr) { - struct mlx5_txq_ctrl *txq_ctrl = - container_of(txq, struct mlx5_txq_ctrl, txq); + struct priv *priv = dev->data->dev_private; + uint16_t idx; + uint32_t lkey = UINT32_MAX; struct mlx5_mr *mr; - priv_lock(txq_ctrl->priv); - mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx); - priv_unlock(txq_ctrl->priv); - return mr; + /* + * If the global cache has overflowed since it failed to expand the + * B-tree table, it can't have all the existing MRs. Then, the address + * has to be searched by traversing the original MR list instead, which + * is very slow path. Otherwise, the global cache is all inclusive. + */ + if (!unlikely(priv->mr.cache.overflow)) { + lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); + if (lkey != UINT32_MAX) + *entry = (*priv->mr.cache.table)[idx]; + } else { + /* Falling back to the slowest path. */ + mr = mr_lookup_dev_list(dev, entry, addr); + if (mr != NULL) + lkey = entry->lkey; + } + assert(lkey == UINT32_MAX || (addr >= entry->start && + addr < entry->end)); + return lkey; } -struct mlx5_mp2mr_mbuf_check_data { - int ret; -}; +/** + * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free() + * can raise memory free event and the callback function will spin on the lock. + * + * @param mr + * Pointer to MR to free. + */ +static void +mr_free(struct mlx5_mr *mr) +{ + if (mr == NULL) + return; + DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr); + if (mr->ibv_mr != NULL) + claim_zero(mlx5_glue->dereg_mr(mr->ibv_mr)); + if (mr->ms_bmp != NULL) + rte_bitmap_free(mr->ms_bmp); + rte_free(mr); +} /** - * Callback function for rte_mempool_obj_iter() to check whether a given - * mempool object looks like a mbuf. - * - * @param[in] mp - * The mempool pointer - * @param[in] arg - * Context data (struct txq_mp2mr_mbuf_check_data). Contains the - * return value. - * @param[in] obj - * Object address. - * @param index - * Object index, unused. + * Releass resources of detached MR having no online entry. + * + * @param dev + * Pointer to Ethernet device. */ static void -txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, - uint32_t index __rte_unused) +mlx5_mr_garbage_collect(struct rte_eth_dev *dev) { - struct mlx5_mp2mr_mbuf_check_data *data = arg; - struct rte_mbuf *buf = obj; + struct priv *priv = dev->data->dev_private; + struct mlx5_mr *mr_next; + struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list); + /* Must be called from the primary process. */ + assert(rte_eal_process_type() == RTE_PROC_PRIMARY); /* - * Check whether mbuf structure fits element size and whether mempool - * pointer is valid. + * MR can't be freed with holding the lock because rte_free() could call + * memory free callback function. This will be a deadlock situation. */ - if (sizeof(*buf) > mp->elt_size || buf->pool != mp) - data->ret = -1; + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Detach the whole free list and release it after unlocking. */ + free_list = priv->mr.mr_free_list; + LIST_INIT(&priv->mr.mr_free_list); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Release resources. */ + mr_next = LIST_FIRST(&free_list); + while (mr_next != NULL) { + struct mlx5_mr *mr = mr_next; + + mr_next = LIST_NEXT(mr, mr); + mr_free(mr); + } +} + +/* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */ +static int +mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl, + const struct rte_memseg *ms, size_t len, void *arg) +{ + struct mr_find_contig_memsegs_data *data = arg; + + if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len) + return 0; + /* Found, save it and stop walking. */ + data->start = ms->addr_64; + data->end = ms->addr_64 + len; + data->msl = msl; + return 1; } /** - * Iterator function for rte_mempool_walk() to register existing mempools and - * fill the MP to MR cache of a TX queue. + * Create a new global Memroy Region (MR) for a missing virtual address. + * Register entire virtually contiguous memory chunk around the address. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this will not be updated. + * @param addr + * Target virtual address to register. * - * @param[in] mp - * Memory Pool to register. - * @param *arg - * Pointer to TX queue structure. + * @return + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. */ -void -mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) +static uint32_t +mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry, + uintptr_t addr) { - struct priv *priv = (struct priv *)arg; - struct mlx5_mp2mr_mbuf_check_data data = { - .ret = 0, + struct priv *priv = dev->data->dev_private; + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + const struct rte_memseg_list *msl; + const struct rte_memseg *ms; + struct mlx5_mr *mr = NULL; + size_t len; + uint32_t ms_n; + uint32_t bmp_size; + void *bmp_mem; + int ms_idx_shift = -1; + unsigned int n; + struct mr_find_contig_memsegs_data data = { + .addr = addr, }; - struct mlx5_mr *mr; + struct mr_find_contig_memsegs_data data_re; - /* Register mempool only if the first element looks like a mbuf. */ - if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || - data.ret == -1) - return; - mr = priv_mr_get(priv, mp); - if (mr) { - priv_mr_release(priv, mr); - return; + DRV_LOG(DEBUG, "port %u creating a MR using address (%p)", + dev->data->port_id, (void *)addr); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + DRV_LOG(WARNING, + "port %u using address (%p) of unregistered mempool" + " in secondary process, please create mempool" + " before rte_eth_dev_start()", + dev->data->port_id, (void *)addr); + rte_errno = EPERM; + goto err_nolock; + } + /* + * Release detached MRs if any. This can't be called with holding either + * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have + * been detached by the memory free event but it couldn't be released + * inside the callback due to deadlock. As a result, releasing resources + * is quite opportunistic. + */ + mlx5_mr_garbage_collect(dev); + /* + * Find out a contiguous virtual address chunk in use, to which the + * given address belongs, in order to register maximum range. In the + * best case where mempools are not dynamically recreated and + * '--socket-mem' is speicified as an EAL option, it is very likely to + * have only one MR(LKey) per a socket and per a hugepage-size even + * though the system memory is highly fragmented. + */ + if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) { + DRV_LOG(WARNING, + "port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); + rte_errno = ENXIO; + goto err_nolock; + } +alloc_resources: + /* Addresses must be page-aligned. */ + assert(rte_is_aligned((void *)data.start, data.msl->page_sz)); + assert(rte_is_aligned((void *)data.end, data.msl->page_sz)); + msl = data.msl; + ms = rte_mem_virt2memseg((void *)data.start, msl); + len = data.end - data.start; + assert(msl->page_sz == ms->hugepage_sz); + /* Number of memsegs in the range. */ + ms_n = len / msl->page_sz; + DRV_LOG(DEBUG, + "port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " page_sz=0x%" PRIx64 ", ms_n=%u", + dev->data->port_id, (void *)addr, + data.start, data.end, msl->page_sz, ms_n); + /* Size of memory for bitmap. */ + bmp_size = rte_bitmap_get_memory_footprint(ms_n); + mr = rte_zmalloc_socket(NULL, + RTE_ALIGN_CEIL(sizeof(*mr), + RTE_CACHE_LINE_SIZE) + + bmp_size, + RTE_CACHE_LINE_SIZE, msl->socket_id); + if (mr == NULL) { + DRV_LOG(WARNING, + "port %u unable to allocate memory for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); + rte_errno = ENOMEM; + goto err_nolock; + } + mr->msl = msl; + /* + * Save the index of the first memseg and initialize memseg bitmap. To + * see if a memseg of ms_idx in the memseg-list is still valid, check: + * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx) + */ + mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE); + mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size); + if (mr->ms_bmp == NULL) { + DRV_LOG(WARNING, + "port %u unable to initialize bitamp for a new MR of" + " address (%p).", + dev->data->port_id, (void *)addr); + rte_errno = EINVAL; + goto err_nolock; + } + /* + * Should recheck whether the extended contiguous chunk is still valid. + * Because memory_hotplug_lock can't be held if there's any memory + * related calls in a critical path, resource allocation above can't be + * locked. If the memory has been changed at this point, try again with + * just single page. If not, go on with the big chunk atomically from + * here. + */ + rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + data_re = data; + if (len > msl->page_sz && + !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) { + DRV_LOG(WARNING, + "port %u unable to find virtually contiguous" + " chunk for address (%p)." + " rte_memseg_contig_walk() failed.", + dev->data->port_id, (void *)addr); + rte_errno = ENXIO; + goto err_memlock; + } + if (data.start != data_re.start || data.end != data_re.end) { + /* + * The extended contiguous chunk has been changed. Try again + * with single memseg instead. + */ + data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz); + data.end = data.start + msl->page_sz; + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + mr_free(mr); + goto alloc_resources; + } + assert(data.msl == data_re.msl); + rte_rwlock_write_lock(&priv->mr.rwlock); + /* + * Check the address is really missing. If other thread already created + * one or it is not found due to overflow, abort and return. + */ + if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) { + /* + * Insert to the global cache table. It may fail due to + * low-on-memory. Then, this entry will have to be searched + * here again. + */ + mr_btree_insert(&priv->mr.cache, entry); + DRV_LOG(DEBUG, + "port %u found MR for %p on final lookup, abort", + dev->data->port_id, (void *)addr); + rte_rwlock_write_unlock(&priv->mr.rwlock); + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + /* + * Must be unlocked before calling rte_free() because + * mlx5_mr_mem_event_free_cb() can be called inside. + */ + mr_free(mr); + return entry->lkey; } - priv_mr_new(priv, mp); + /* + * Trim start and end addresses for verbs MR. Set bits for registering + * memsegs but exclude already registered ones. Bitmap can be + * fragmented. + */ + for (n = 0; n < ms_n; ++n) { + uintptr_t start; + struct mlx5_mr_cache ret = { 0, }; + + start = data_re.start + n * msl->page_sz; + /* Exclude memsegs already registered by other MRs. */ + if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) { + /* + * Start from the first unregistered memseg in the + * extended range. + */ + if (ms_idx_shift == -1) { + mr->ms_base_idx += n; + data.start = start; + ms_idx_shift = n; + } + data.end = start + msl->page_sz; + rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift); + ++mr->ms_n; + } + } + len = data.end - data.start; + mr->ms_bmp_n = len / msl->page_sz; + assert(ms_idx_shift + mr->ms_bmp_n <= ms_n); + /* + * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be + * called with holding the memory lock because it doesn't use + * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket() + * through mlx5_alloc_verbs_buf(). + */ + mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len, + IBV_ACCESS_LOCAL_WRITE); + if (mr->ibv_mr == NULL) { + DRV_LOG(WARNING, + "port %u fail to create a verbs MR for address (%p)", + dev->data->port_id, (void *)addr); + rte_errno = EINVAL; + goto err_mrlock; + } + assert((uintptr_t)mr->ibv_mr->addr == data.start); + assert(mr->ibv_mr->length == len); + LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); + DRV_LOG(DEBUG, + "port %u MR CREATED (%p) for %p:\n" + " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", + dev->data->port_id, (void *)mr, (void *)addr, + data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); + /* Insert to the global cache table. */ + mr_insert_dev_cache(dev, mr); + /* Fill in output data. */ + mr_lookup_dev(dev, entry, addr); + /* Lookup can't fail. */ + assert(entry->lkey != UINT32_MAX); + rte_rwlock_write_unlock(&priv->mr.rwlock); + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + return entry->lkey; +err_mrlock: + rte_rwlock_write_unlock(&priv->mr.rwlock); +err_memlock: + rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); +err_nolock: + /* + * In case of error, as this can be called in a datapath, a warning + * message per an error is preferable instead. Must be unlocked before + * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called + * inside. + */ + mr_free(mr); + return UINT32_MAX; } /** - * Register a new memory region from the mempool and store it in the memory - * region list. + * Rebuild the global B-tree cache of device from the original MR list. * - * @param priv - * Pointer to private structure. - * @param mp - * Pointer to the memory pool to register. - * @return - * The memory region on success. + * @param dev + * Pointer to Ethernet device. */ -struct mlx5_mr* -priv_mr_new(struct priv *priv, struct rte_mempool *mp) +static void +mr_rebuild_dev_cache(struct rte_eth_dev *dev) { - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - uintptr_t start; - uintptr_t end; - unsigned int i; + struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; - mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id); - if (!mr) { - DEBUG("unable to configure MR, ibv_reg_mr() failed."); - return NULL; + DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id); + /* Flush cache to rebuild. */ + priv->mr.cache.len = 1; + priv->mr.cache.overflow = 0; + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) + if (mr_insert_dev_cache(dev, mr) < 0) + return; +} + +/** + * Callback for memory free event. Iterate freed memsegs and check whether it + * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a + * result, the MR would be fragmented. If it becomes empty, the MR will be freed + * later by mlx5_mr_garbage_collect(). Even if this callback is called from a + * secondary process, the garbage collector will be called in primary process + * as the secondary process can't call mlx5_mr_create(). + * + * The global cache must be rebuilt if there's any change and this event has to + * be propagated to dataplane threads to flush the local caches. + * + * @param dev + * Pointer to Ethernet device. + * @param addr + * Address of freed memory. + * @param len + * Size of freed memory. + */ +static void +mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_memseg_list *msl; + struct mlx5_mr *mr; + int ms_n; + int i; + int rebuild = 0; + + DRV_LOG(DEBUG, "port %u free callback: addr=%p, len=%zu", + dev->data->port_id, addr, len); + msl = rte_mem_virt2memseg_list(addr); + /* addr and len must be page-aligned. */ + assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz)); + assert(len == RTE_ALIGN(len, msl->page_sz)); + ms_n = len / msl->page_sz; + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Clear bits of freed memsegs from MR. */ + for (i = 0; i < ms_n; ++i) { + const struct rte_memseg *ms; + struct mlx5_mr_cache entry; + uintptr_t start; + int ms_idx; + uint32_t pos; + + /* Find MR having this memseg. */ + start = (uintptr_t)addr + i * msl->page_sz; + mr = mr_lookup_dev_list(dev, &entry, start); + if (mr == NULL) + continue; + ms = rte_mem_virt2memseg((void *)start, msl); + assert(ms != NULL); + assert(msl->page_sz == ms->hugepage_sz); + ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms); + pos = ms_idx - mr->ms_base_idx; + assert(rte_bitmap_get(mr->ms_bmp, pos)); + assert(pos < mr->ms_bmp_n); + DRV_LOG(DEBUG, "port %u MR(%p): clear bitmap[%u] for addr %p", + dev->data->port_id, (void *)mr, pos, (void *)start); + rte_bitmap_clear(mr->ms_bmp, pos); + if (--mr->ms_n == 0) { + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); + DRV_LOG(DEBUG, "port %u remove MR(%p) from list", + dev->data->port_id, (void *)mr); + } + /* + * MR is fragmented or will be freed. the global cache must be + * rebuilt. + */ + rebuild = 1; } - if (mlx5_check_mempool(mp, &start, &end) != 0) { - ERROR("mempool %p: not virtually contiguous", - (void *)mp); - return NULL; + if (rebuild) { + mr_rebuild_dev_cache(dev); + /* + * Flush local caches by propagating invalidation across cores. + * rte_smp_wmb() is enough to synchronize this event. If one of + * freed memsegs is seen by other core, that means the memseg + * has been allocated by allocator, which will come after this + * free call. Therefore, this store instruction (incrementing + * generation below) will be guaranteed to be seen by other core + * before the core sees the newly allocated memory. + */ + ++priv->mr.dev_gen; + DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d", + priv->mr.dev_gen); + rte_smp_wmb(); } - DEBUG("mempool %p area start=%p end=%p size=%zu", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - /* Save original addresses for exact MR lookup. */ - mr->start = start; - mr->end = end; - /* Round start and end to page boundary if found in memory segments. */ - for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { - uintptr_t addr = (uintptr_t)ms[i].addr; - size_t len = ms[i].len; - unsigned int align = ms[i].hugepage_sz; - - if ((start > addr) && (start < addr + len)) - start = RTE_ALIGN_FLOOR(start, align); - if ((end > addr) && (end < addr + len)) - end = RTE_ALIGN_CEIL(end, align); + rte_rwlock_write_unlock(&priv->mr.rwlock); + if (rebuild && rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG) + mlx5_mr_dump_dev(dev); +} + +/** + * Callback for memory event. This can be called from both primary and secondary + * process. + * + * @param event_type + * Memory event type. + * @param addr + * Address of memory. + * @param len + * Size of memory. + */ +void +mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg __rte_unused) +{ + struct priv *priv; + struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list; + + switch (event_type) { + case RTE_MEM_EVENT_FREE: + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + /* Iterate all the existing mlx5 devices. */ + LIST_FOREACH(priv, dev_list, mem_event_cb) + mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + break; + case RTE_MEM_EVENT_ALLOC: + default: + break; } - DEBUG("mempool %p using start=%p end=%p size=%zu for MR", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start, - IBV_ACCESS_LOCAL_WRITE); - mr->mp = mp; - mr->lkey = rte_cpu_to_be_32(mr->mr->lkey); - rte_atomic32_inc(&mr->refcnt); - DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv, - (void *)mr, rte_atomic32_read(&mr->refcnt)); - LIST_INSERT_HEAD(&priv->mr, mr, next); - return mr; } /** - * Search the memory region object in the memory region list. + * Look up address in the global MR cache table. If not found, create a new MR. + * Insert the found/created entry to local bottom-half cache table. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this is not written. + * @param addr + * Search key. * - * @param priv - * Pointer to private structure. - * @param mp - * Pointer to the memory pool to register. * @return - * The memory region on success. + * Searched LKey on success, UINT32_MAX on no match. */ -struct mlx5_mr* -priv_mr_get(struct priv *priv, struct rte_mempool *mp) +static uint32_t +mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + struct mlx5_mr_cache *entry, uintptr_t addr) { - struct mlx5_mr *mr; + struct priv *priv = dev->data->dev_private; + struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh; + uint16_t idx; + uint32_t lkey; - assert(mp); - if (LIST_EMPTY(&priv->mr)) - return NULL; - LIST_FOREACH(mr, &priv->mr, next) { - if (mr->mp == mp) { - rte_atomic32_inc(&mr->refcnt); - DEBUG("Memory Region %p refcnt: %d", - (void *)mr, rte_atomic32_read(&mr->refcnt)); - return mr; - } + /* If local cache table is full, try to double it. */ + if (unlikely(bt->len == bt->size)) + mr_btree_expand(bt, bt->size << 1); + /* Look up in the global cache. */ + rte_rwlock_read_lock(&priv->mr.rwlock); + lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr); + if (lkey != UINT32_MAX) { + /* Found. */ + *entry = (*priv->mr.cache.table)[idx]; + rte_rwlock_read_unlock(&priv->mr.rwlock); + /* + * Update local cache. Even if it fails, return the found entry + * to update top-half cache. Next time, this entry will be found + * in the global cache. + */ + mr_btree_insert(bt, entry); + return lkey; } - return NULL; + rte_rwlock_read_unlock(&priv->mr.rwlock); + /* First time to see the address? Create a new MR. */ + lkey = mlx5_mr_create(dev, entry, addr); + /* + * Update the local cache if successfully created a new global MR. Even + * if failed to create one, there's no action to take in this datapath + * code. As returning LKey is invalid, this will eventually make HW + * fail. + */ + if (lkey != UINT32_MAX) + mr_btree_insert(bt, entry); + return lkey; } /** - * Release the memory region object. + * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if + * misses, search in the global MR cache table and update the new entry to + * per-queue local caches. * - * @param mr - * Pointer to memory region to release. + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param addr + * Search key. * * @return - * 0 on success, errno on failure. + * Searched LKey on success, UINT32_MAX on no match. */ -int -priv_mr_release(struct priv *priv, struct mlx5_mr *mr) +static uint32_t +mlx5_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + uintptr_t addr) { - (void)priv; - assert(mr); - DEBUG("Memory Region %p refcnt: %d", - (void *)mr, rte_atomic32_read(&mr->refcnt)); - if (rte_atomic32_dec_and_test(&mr->refcnt)) { - claim_zero(mlx5_glue->dereg_mr(mr->mr)); - LIST_REMOVE(mr, next); - rte_free(mr); - return 0; + uint32_t lkey; + uint16_t bh_idx = 0; + /* Victim in top-half cache to replace with new entry. */ + struct mlx5_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head]; + + /* Binary-search MR translation table. */ + lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr); + /* Update top-half cache. */ + if (likely(lkey != UINT32_MAX)) { + *repl = (*mr_ctrl->cache_bh.table)[bh_idx]; + } else { + /* + * If missed in local lookup table, search in the global cache + * and local cache_bh[] will be updated inside if possible. + * Top-half cache entry will also be updated. + */ + lkey = mlx5_mr_lookup_dev(dev, mr_ctrl, repl, addr); + if (unlikely(lkey == UINT32_MAX)) + return UINT32_MAX; } - return EBUSY; + /* Update the most recently used entry. */ + mr_ctrl->mru = mr_ctrl->head; + /* Point to the next victim, the oldest. */ + mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N; + return lkey; +} + +/** + * Bottom-half of LKey search on Rx. + * + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr) +{ + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + struct priv *priv = rxq_ctrl->priv; + + DRV_LOG(DEBUG, + "Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p", + rxq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr); + return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr); +} + +/** + * Bottom-half of LKey search on Tx. + * + * @param txq + * Pointer to Tx queue structure. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr) +{ + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct priv *priv = txq_ctrl->priv; + + DRV_LOG(DEBUG, + "Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p", + txq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr); + return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr); +} + +/** + * Flush all of the local cache entries. + * + * @param mr_ctrl + * Pointer to per-queue MR control structure. + */ +void +mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl) +{ + /* Reset the most-recently-used index. */ + mr_ctrl->mru = 0; + /* Reset the linear search array. */ + mr_ctrl->head = 0; + memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache)); + /* Reset the B-tree table. */ + mr_ctrl->cache_bh.len = 1; + mr_ctrl->cache_bh.overflow = 0; + /* Update the generation number. */ + mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr; + DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d", + (void *)mr_ctrl, mr_ctrl->cur_gen); +} + +/* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */ +static void +mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque, + struct rte_mempool_memhdr *memhdr, + unsigned mem_idx __rte_unused) +{ + struct mr_update_mp_data *data = opaque; + uint32_t lkey; + + /* Stop iteration if failed in the previous walk. */ + if (data->ret < 0) + return; + /* Register address of the chunk and update local caches. */ + lkey = mlx5_mr_addr2mr_bh(data->dev, data->mr_ctrl, + (uintptr_t)memhdr->addr); + if (lkey == UINT32_MAX) + data->ret = -1; } /** - * Verify the flow list is empty + * Register entire memory chunks in a Mempool. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param mp + * Pointer to registering Mempool. * - * @return the number of object not released. + * @return + * 0 on success, -1 on failure. */ int -priv_mr_verify(struct priv *priv) +mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + struct rte_mempool *mp) { - int ret = 0; + struct mr_update_mp_data data = { + .dev = dev, + .mr_ctrl = mr_ctrl, + .ret = 0, + }; + + rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data); + return data.ret; +} + +/** + * Dump all the created MRs and the global cache entries. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_mr_dump_dev(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; struct mlx5_mr *mr; + int mr_n = 0; + int chunk_n = 0; + + rte_rwlock_read_lock(&priv->mr.rwlock); + /* Iterate all the existing MRs. */ + LIST_FOREACH(mr, &priv->mr.mr_list, mr) { + unsigned int n; - LIST_FOREACH(mr, &priv->mr, next) { - DEBUG("%p: mr %p still referenced", (void *)priv, - (void *)mr); - ++ret; + DRV_LOG(DEBUG, + "port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u", + dev->data->port_id, mr_n++, + rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_n, mr->ms_bmp_n); + if (mr->ms_n == 0) + continue; + for (n = 0; n < mr->ms_bmp_n; ) { + struct mlx5_mr_cache ret = { 0, }; + + n = mr_find_next_chunk(mr, &ret, n); + if (!ret.end) + break; + DRV_LOG(DEBUG, + " chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")", + chunk_n++, ret.start, ret.end); + } } - return ret; + DRV_LOG(DEBUG, "port %u dumping global cache", dev->data->port_id); + mlx5_mr_btree_dump(&priv->mr.cache); + rte_rwlock_read_unlock(&priv->mr.rwlock); +} + +/** + * Release all the created MRs and resources. Remove device from memory callback + * list. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_mr_release(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_mr *mr_next = LIST_FIRST(&priv->mr.mr_list); + + /* Remove from memory callback device list. */ + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + LIST_REMOVE(priv, mem_event_cb); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG) + mlx5_mr_dump_dev(dev); + rte_rwlock_write_lock(&priv->mr.rwlock); + /* Detach from MR list and move to free list. */ + while (mr_next != NULL) { + struct mlx5_mr *mr = mr_next; + + mr_next = LIST_NEXT(mr, mr); + LIST_REMOVE(mr, mr); + LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr); + } + LIST_INIT(&priv->mr.mr_list); + /* Free global cache. */ + mlx5_mr_btree_free(&priv->mr.cache); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Free all remaining MRs. */ + mlx5_mr_garbage_collect(dev); } diff --git a/drivers/net/mlx5/mlx5_mr.h b/drivers/net/mlx5/mlx5_mr.h new file mode 100644 index 00000000..e0b28215 --- /dev/null +++ b/drivers/net/mlx5/mlx5_mr.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#ifndef RTE_PMD_MLX5_MR_H_ +#define RTE_PMD_MLX5_MR_H_ + +#include +#include +#include + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include + +/* Memory Region object. */ +struct mlx5_mr { + LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */ + struct ibv_mr *ibv_mr; /* Verbs Memory Region. */ + const struct rte_memseg_list *msl; + int ms_base_idx; /* Start index of msl->memseg_arr[]. */ + int ms_n; /* Number of memsegs in use. */ + uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */ + struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */ +}; + +/* Cache entry for Memory Region. */ +struct mlx5_mr_cache { + uintptr_t start; /* Start address of MR. */ + uintptr_t end; /* End address of MR. */ + uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */ +} __rte_packed; + +/* MR Cache table for Binary search. */ +struct mlx5_mr_btree { + uint16_t len; /* Number of entries. */ + uint16_t size; /* Total number of entries. */ + int overflow; /* Mark failure of table expansion. */ + struct mlx5_mr_cache (*table)[]; +} __rte_packed; + +/* Per-queue MR control descriptor. */ +struct mlx5_mr_ctrl { + uint32_t *dev_gen_ptr; /* Generation number of device to poll. */ + uint32_t cur_gen; /* Generation number saved to flush caches. */ + uint16_t mru; /* Index of last hit entry in top-half cache. */ + uint16_t head; /* Index of the oldest entry in top-half cache. */ + struct mlx5_mr_cache cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */ + struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */ +} __rte_packed; + +extern struct mlx5_dev_list mlx5_mem_event_cb_list; +extern rte_rwlock_t mlx5_mem_event_rwlock; + +/* First entry must be NULL for comparison. */ +#define mlx5_mr_btree_len(bt) ((bt)->len - 1) + +int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket); +void mlx5_mr_btree_free(struct mlx5_mr_btree *bt); +void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg); +int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + struct rte_mempool *mp); +void mlx5_mr_dump_dev(struct rte_eth_dev *dev); +void mlx5_mr_release(struct rte_eth_dev *dev); + +/** + * Look up LKey from given lookup table by linear search. Firstly look up the + * last-hit entry. If miss, the entire array is searched. If found, update the + * last-hit index and return LKey. + * + * @param lkp_tbl + * Pointer to lookup table. + * @param[in,out] cached_idx + * Pointer to last-hit index. + * @param n + * Size of lookup table. + * @param addr + * Search key. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +static __rte_always_inline uint32_t +mlx5_mr_lookup_cache(struct mlx5_mr_cache *lkp_tbl, uint16_t *cached_idx, + uint16_t n, uintptr_t addr) +{ + uint16_t idx; + + if (likely(addr >= lkp_tbl[*cached_idx].start && + addr < lkp_tbl[*cached_idx].end)) + return lkp_tbl[*cached_idx].lkey; + for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) { + if (addr >= lkp_tbl[idx].start && + addr < lkp_tbl[idx].end) { + /* Found. */ + *cached_idx = idx; + return lkp_tbl[idx].lkey; + } + } + return UINT32_MAX; +} + +#endif /* RTE_PMD_MLX5_MR_H_ */ diff --git a/drivers/net/mlx5/mlx5_nl.c b/drivers/net/mlx5/mlx5_nl.c new file mode 100644 index 00000000..dca85835 --- /dev/null +++ b/drivers/net/mlx5/mlx5_nl.c @@ -0,0 +1,627 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 6WIND S.A. + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include +#include +#include + +#include "mlx5.h" +#include "mlx5_utils.h" + +/* Size of the buffer to receive kernel messages */ +#define MLX5_NL_BUF_SIZE (32 * 1024) +/* Send buffer size for the Netlink socket */ +#define MLX5_SEND_BUF_SIZE 32768 +/* Receive buffer size for the Netlink socket */ +#define MLX5_RECV_BUF_SIZE 32768 + +/* + * Define NDA_RTA as defined in iproute2 sources. + * + * see in iproute2 sources file include/libnetlink.h + */ +#ifndef MLX5_NDA_RTA +#define MLX5_NDA_RTA(r) \ + ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg)))) +#endif + +/* Add/remove MAC address through Netlink */ +struct mlx5_nl_mac_addr { + struct ether_addr (*mac)[]; + /**< MAC address handled by the device. */ + int mac_n; /**< Number of addresses in the array. */ +}; + +/** + * Opens a Netlink socket. + * + * @param nl_groups + * Netlink group value (e.g. RTMGRP_LINK). + * + * @return + * A file descriptor on success, a negative errno value otherwise and + * rte_errno is set. + */ +int +mlx5_nl_init(uint32_t nl_groups) +{ + int fd; + int sndbuf_size = MLX5_SEND_BUF_SIZE; + int rcvbuf_size = MLX5_RECV_BUF_SIZE; + struct sockaddr_nl local = { + .nl_family = AF_NETLINK, + .nl_groups = nl_groups, + }; + int ret; + + fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE); + if (fd == -1) { + rte_errno = errno; + return -rte_errno; + } + ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int)); + if (ret == -1) { + rte_errno = errno; + goto error; + } + ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int)); + if (ret == -1) { + rte_errno = errno; + goto error; + } + ret = bind(fd, (struct sockaddr *)&local, sizeof(local)); + if (ret == -1) { + rte_errno = errno; + goto error; + } + return fd; +error: + close(fd); + return -rte_errno; +} + +/** + * Send a request message to the kernel on the Netlink socket. + * + * @param[in] nlsk_fd + * Netlink socket file descriptor. + * @param[in] nh + * The Netlink message send to the kernel. + * @param[in] ssn + * Sequence number. + * @param[in] req + * Pointer to the request structure. + * @param[in] len + * Length of the request in bytes. + * + * @return + * The number of sent bytes on success, a negative errno value otherwise and + * rte_errno is set. + */ +static int +mlx5_nl_request(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn, void *req, + int len) +{ + struct sockaddr_nl sa = { + .nl_family = AF_NETLINK, + }; + struct iovec iov[2] = { + { .iov_base = nh, .iov_len = sizeof(*nh), }, + { .iov_base = req, .iov_len = len, }, + }; + struct msghdr msg = { + .msg_name = &sa, + .msg_namelen = sizeof(sa), + .msg_iov = iov, + .msg_iovlen = 2, + }; + int send_bytes; + + nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */ + nh->nlmsg_seq = sn; + send_bytes = sendmsg(nlsk_fd, &msg, 0); + if (send_bytes < 0) { + rte_errno = errno; + return -rte_errno; + } + return send_bytes; +} + +/** + * Send a message to the kernel on the Netlink socket. + * + * @param[in] nlsk_fd + * The Netlink socket file descriptor used for communication. + * @param[in] nh + * The Netlink message send to the kernel. + * @param[in] sn + * Sequence number. + * + * @return + * The number of sent bytes on success, a negative errno value otherwise and + * rte_errno is set. + */ +static int +mlx5_nl_send(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn) +{ + struct sockaddr_nl sa = { + .nl_family = AF_NETLINK, + }; + struct iovec iov = { + .iov_base = nh, + .iov_len = nh->nlmsg_len, + }; + struct msghdr msg = { + .msg_name = &sa, + .msg_namelen = sizeof(sa), + .msg_iov = &iov, + .msg_iovlen = 1, + }; + int send_bytes; + + nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */ + nh->nlmsg_seq = sn; + send_bytes = sendmsg(nlsk_fd, &msg, 0); + if (send_bytes < 0) { + rte_errno = errno; + return -rte_errno; + } + return send_bytes; +} + +/** + * Receive a message from the kernel on the Netlink socket, following + * mlx5_nl_send(). + * + * @param[in] nlsk_fd + * The Netlink socket file descriptor used for communication. + * @param[in] sn + * Sequence number. + * @param[in] cb + * The callback function to call for each Netlink message received. + * @param[in, out] arg + * Custom arguments for the callback. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg), + void *arg) +{ + struct sockaddr_nl sa; + char buf[MLX5_RECV_BUF_SIZE]; + struct iovec iov = { + .iov_base = buf, + .iov_len = sizeof(buf), + }; + struct msghdr msg = { + .msg_name = &sa, + .msg_namelen = sizeof(sa), + .msg_iov = &iov, + /* One message at a time */ + .msg_iovlen = 1, + }; + int multipart = 0; + int ret = 0; + + do { + struct nlmsghdr *nh; + int recv_bytes = 0; + + do { + recv_bytes = recvmsg(nlsk_fd, &msg, 0); + if (recv_bytes == -1) { + rte_errno = errno; + return -rte_errno; + } + nh = (struct nlmsghdr *)buf; + } while (nh->nlmsg_seq != sn); + for (; + NLMSG_OK(nh, (unsigned int)recv_bytes); + nh = NLMSG_NEXT(nh, recv_bytes)) { + if (nh->nlmsg_type == NLMSG_ERROR) { + struct nlmsgerr *err_data = NLMSG_DATA(nh); + + if (err_data->error < 0) { + rte_errno = -err_data->error; + return -rte_errno; + } + /* Ack message. */ + return 0; + } + /* Multi-part msgs and their trailing DONE message. */ + if (nh->nlmsg_flags & NLM_F_MULTI) { + if (nh->nlmsg_type == NLMSG_DONE) + return 0; + multipart = 1; + } + if (cb) { + ret = cb(nh, arg); + if (ret < 0) + return ret; + } + } + } while (multipart); + return ret; +} + +/** + * Parse Netlink message to retrieve the bridge MAC address. + * + * @param nh + * Pointer to Netlink Message Header. + * @param arg + * PMD data register with this callback. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_mac_addr_cb(struct nlmsghdr *nh, void *arg) +{ + struct mlx5_nl_mac_addr *data = arg; + struct ndmsg *r = NLMSG_DATA(nh); + struct rtattr *attribute; + int len; + + len = nh->nlmsg_len - NLMSG_LENGTH(sizeof(*r)); + for (attribute = MLX5_NDA_RTA(r); + RTA_OK(attribute, len); + attribute = RTA_NEXT(attribute, len)) { + if (attribute->rta_type == NDA_LLADDR) { + if (data->mac_n == MLX5_MAX_MAC_ADDRESSES) { + DRV_LOG(WARNING, + "not enough room to finalize the" + " request"); + rte_errno = ENOMEM; + return -rte_errno; + } +#ifndef NDEBUG + char m[18]; + + ether_format_addr(m, 18, RTA_DATA(attribute)); + DRV_LOG(DEBUG, "bridge MAC address %s", m); +#endif + memcpy(&(*data->mac)[data->mac_n++], + RTA_DATA(attribute), ETHER_ADDR_LEN); + } + } + return 0; +} + +/** + * Get bridge MAC addresses. + * + * @param dev + * Pointer to Ethernet device. + * @param mac[out] + * Pointer to the array table of MAC addresses to fill. + * Its size should be of MLX5_MAX_MAC_ADDRESSES. + * @param mac_n[out] + * Number of entries filled in MAC array. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[], + int *mac_n) +{ + struct priv *priv = dev->data->dev_private; + int iface_idx = mlx5_ifindex(dev); + struct { + struct nlmsghdr hdr; + struct ifinfomsg ifm; + } req = { + .hdr = { + .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), + .nlmsg_type = RTM_GETNEIGH, + .nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, + }, + .ifm = { + .ifi_family = PF_BRIDGE, + .ifi_index = iface_idx, + }, + }; + struct mlx5_nl_mac_addr data = { + .mac = mac, + .mac_n = 0, + }; + int fd; + int ret; + uint32_t sn = priv->nl_sn++; + + if (priv->nl_socket == -1) + return 0; + fd = priv->nl_socket; + ret = mlx5_nl_request(fd, &req.hdr, sn, &req.ifm, + sizeof(struct ifinfomsg)); + if (ret < 0) + goto error; + ret = mlx5_nl_recv(fd, sn, mlx5_nl_mac_addr_cb, &data); + if (ret < 0) + goto error; + *mac_n = data.mac_n; + return 0; +error: + DRV_LOG(DEBUG, "port %u cannot retrieve MAC address list %s", + dev->data->port_id, strerror(rte_errno)); + return -rte_errno; +} + +/** + * Modify the MAC address neighbour table with Netlink. + * + * @param dev + * Pointer to Ethernet device. + * @param mac + * MAC address to consider. + * @param add + * 1 to add the MAC address, 0 to remove the MAC address. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac, + int add) +{ + struct priv *priv = dev->data->dev_private; + int iface_idx = mlx5_ifindex(dev); + struct { + struct nlmsghdr hdr; + struct ndmsg ndm; + struct rtattr rta; + uint8_t buffer[ETHER_ADDR_LEN]; + } req = { + .hdr = { + .nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)), + .nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | + NLM_F_EXCL | NLM_F_ACK, + .nlmsg_type = add ? RTM_NEWNEIGH : RTM_DELNEIGH, + }, + .ndm = { + .ndm_family = PF_BRIDGE, + .ndm_state = NUD_NOARP | NUD_PERMANENT, + .ndm_ifindex = iface_idx, + .ndm_flags = NTF_SELF, + }, + .rta = { + .rta_type = NDA_LLADDR, + .rta_len = RTA_LENGTH(ETHER_ADDR_LEN), + }, + }; + int fd; + int ret; + uint32_t sn = priv->nl_sn++; + + if (priv->nl_socket == -1) + return 0; + fd = priv->nl_socket; + memcpy(RTA_DATA(&req.rta), mac, ETHER_ADDR_LEN); + req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) + + RTA_ALIGN(req.rta.rta_len); + ret = mlx5_nl_send(fd, &req.hdr, sn); + if (ret < 0) + goto error; + ret = mlx5_nl_recv(fd, sn, NULL, NULL); + if (ret < 0) + goto error; + return 0; +error: + DRV_LOG(DEBUG, + "port %u cannot %s MAC address %02X:%02X:%02X:%02X:%02X:%02X" + " %s", + dev->data->port_id, + add ? "add" : "remove", + mac->addr_bytes[0], mac->addr_bytes[1], + mac->addr_bytes[2], mac->addr_bytes[3], + mac->addr_bytes[4], mac->addr_bytes[5], + strerror(rte_errno)); + return -rte_errno; +} + +/** + * Add a MAC address. + * + * @param dev + * Pointer to Ethernet device. + * @param mac + * MAC address to register. + * @param index + * MAC address index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index) +{ + struct priv *priv = dev->data->dev_private; + int ret; + + ret = mlx5_nl_mac_addr_modify(dev, mac, 1); + if (!ret) + BITFIELD_SET(priv->mac_own, index); + if (ret == -EEXIST) + return 0; + return ret; +} + +/** + * Remove a MAC address. + * + * @param dev + * Pointer to Ethernet device. + * @param mac + * MAC address to remove. + * @param index + * MAC address index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac, + uint32_t index) +{ + struct priv *priv = dev->data->dev_private; + + BITFIELD_RESET(priv->mac_own, index); + return mlx5_nl_mac_addr_modify(dev, mac, 0); +} + +/** + * Synchronize Netlink bridge table to the internal table. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev) +{ + struct ether_addr macs[MLX5_MAX_MAC_ADDRESSES]; + int macs_n = 0; + int i; + int ret; + + ret = mlx5_nl_mac_addr_list(dev, &macs, &macs_n); + if (ret) + return; + for (i = 0; i != macs_n; ++i) { + int j; + + /* Verify the address is not in the array yet. */ + for (j = 0; j != MLX5_MAX_MAC_ADDRESSES; ++j) + if (is_same_ether_addr(&macs[i], + &dev->data->mac_addrs[j])) + break; + if (j != MLX5_MAX_MAC_ADDRESSES) + continue; + /* Find the first entry available. */ + for (j = 0; j != MLX5_MAX_MAC_ADDRESSES; ++j) { + if (is_zero_ether_addr(&dev->data->mac_addrs[j])) { + dev->data->mac_addrs[j] = macs[i]; + break; + } + } + } +} + +/** + * Flush all added MAC addresses. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int i; + + for (i = MLX5_MAX_MAC_ADDRESSES - 1; i >= 0; --i) { + struct ether_addr *m = &dev->data->mac_addrs[i]; + + if (BITFIELD_ISSET(priv->mac_own, i)) + mlx5_nl_mac_addr_remove(dev, m, i); + } +} + +/** + * Enable promiscuous / all multicast mode through Netlink. + * + * @param dev + * Pointer to Ethernet device structure. + * @param flags + * IFF_PROMISC for promiscuous, IFF_ALLMULTI for allmulti. + * @param enable + * Nonzero to enable, disable otherwise. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable) +{ + struct priv *priv = dev->data->dev_private; + int iface_idx = mlx5_ifindex(dev); + struct { + struct nlmsghdr hdr; + struct ifinfomsg ifi; + } req = { + .hdr = { + .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), + .nlmsg_type = RTM_NEWLINK, + .nlmsg_flags = NLM_F_REQUEST, + }, + .ifi = { + .ifi_flags = enable ? flags : 0, + .ifi_change = flags, + .ifi_index = iface_idx, + }, + }; + int fd; + int ret; + + assert(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI))); + if (priv->nl_socket < 0) + return 0; + fd = priv->nl_socket; + ret = mlx5_nl_send(fd, &req.hdr, priv->nl_sn++); + if (ret < 0) + return ret; + return 0; +} + +/** + * Enable promiscuous mode through Netlink. + * + * @param dev + * Pointer to Ethernet device structure. + * @param enable + * Nonzero to enable, disable otherwise. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_promisc(struct rte_eth_dev *dev, int enable) +{ + int ret = mlx5_nl_device_flags(dev, IFF_PROMISC, enable); + + if (ret) + DRV_LOG(DEBUG, + "port %u cannot %s promisc mode: Netlink error %s", + dev->data->port_id, enable ? "enable" : "disable", + strerror(rte_errno)); + return ret; +} + +/** + * Enable all multicast mode through Netlink. + * + * @param dev + * Pointer to Ethernet device structure. + * @param enable + * Nonzero to enable, disable otherwise. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable) +{ + int ret = mlx5_nl_device_flags(dev, IFF_ALLMULTI, enable); + + if (ret) + DRV_LOG(DEBUG, + "port %u cannot %s allmulti mode: Netlink error %s", + dev->data->port_id, enable ? "enable" : "disable", + strerror(rte_errno)); + return ret; +} diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 9eb9c15e..0cf370cd 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2016 6WIND S.A. - * Copyright 2016 Mellanox. + * Copyright 2016 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_PRM_H_ @@ -107,6 +107,30 @@ /* Inner L4 checksum offload (Tunneled packets only). */ #define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5) +/* Outer L4 type is TCP. */ +#define MLX5_ETH_WQE_L4_OUTER_TCP (0u << 5) + +/* Outer L4 type is UDP. */ +#define MLX5_ETH_WQE_L4_OUTER_UDP (1u << 5) + +/* Outer L3 type is IPV4. */ +#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4) + +/* Outer L3 type is IPV6. */ +#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4) + +/* Inner L4 type is TCP. */ +#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1) + +/* Inner L4 type is UDP. */ +#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1) + +/* Inner L3 type is IPV4. */ +#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0) + +/* Inner L3 type is IPV6. */ +#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0) + /* Is flow mark valid. */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN #define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00) @@ -195,6 +219,21 @@ struct mlx5_mpw { } data; }; +/* WQE for Multi-Packet RQ. */ +struct mlx5_wqe_mprq { + struct mlx5_wqe_srq_next_seg next_seg; + struct mlx5_wqe_data_seg dseg; +}; + +#define MLX5_MPRQ_LEN_MASK 0x000ffff +#define MLX5_MPRQ_LEN_SHIFT 0 +#define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000 +#define MLX5_MPRQ_STRIDE_NUM_SHIFT 16 +#define MLX5_MPRQ_FILLER_MASK 0x80000000 +#define MLX5_MPRQ_FILLER_SHIFT 31 + +#define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2 + /* CQ element structure - should be equal to the cache line size */ struct mlx5_cqe { #if (RTE_CACHE_LINE_SIZE == 128) diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c index d06b0bee..d69b4c09 100644 --- a/drivers/net/mlx5/mlx5_rss.c +++ b/drivers/net/mlx5/mlx5_rss.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -35,35 +35,48 @@ * RSS configuration data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct priv *priv = dev->data->dev_private; - int ret = 0; + unsigned int i; + unsigned int idx; - priv_lock(priv); if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) { - ret = -EINVAL; - goto out; + rte_errno = EINVAL; + return -rte_errno; } if (rss_conf->rss_key && rss_conf->rss_key_len) { + if (rss_conf->rss_key_len != rss_hash_default_key_len) { + DRV_LOG(ERR, + "port %u RSS key len must be %zu Bytes long", + dev->data->port_id, rss_hash_default_key_len); + rte_errno = EINVAL; + return -rte_errno; + } priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key, rss_conf->rss_key_len, 0); if (!priv->rss_conf.rss_key) { - ret = -ENOMEM; - goto out; + rte_errno = ENOMEM; + return -rte_errno; } memcpy(priv->rss_conf.rss_key, rss_conf->rss_key, rss_conf->rss_key_len); priv->rss_conf.rss_key_len = rss_conf->rss_key_len; } priv->rss_conf.rss_hf = rss_conf->rss_hf; -out: - priv_unlock(priv); - return ret; + /* Enable the RSS hash in all Rx queues. */ + for (i = 0, idx = 0; idx != priv->rxqs_n; ++i) { + if (!(*priv->rxqs)[i]) + continue; + (*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf && + !!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS); + ++idx; + } + return 0; } /** @@ -75,7 +88,7 @@ out: * RSS configuration data. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, @@ -83,9 +96,10 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - if (!rss_conf) - return -EINVAL; - priv_lock(priv); + if (!rss_conf) { + rte_errno = EINVAL; + return -rte_errno; + } if (rss_conf->rss_key && (rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) { memcpy(rss_conf->rss_key, priv->rss_conf.rss_key, @@ -93,24 +107,24 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev, } rss_conf->rss_key_len = priv->rss_conf.rss_key_len; rss_conf->rss_hf = priv->rss_conf.rss_hf; - priv_unlock(priv); return 0; } /** * Allocate/reallocate RETA index table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @praram reta_size * The size of the array to allocate. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) +mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size) { + struct priv *priv = dev->data->dev_private; void *mem; unsigned int old_size = priv->reta_idx_n; @@ -119,11 +133,12 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) mem = rte_realloc(priv->reta_idx, reta_size * sizeof((*priv->reta_idx)[0]), 0); - if (!mem) - return ENOMEM; + if (!mem) { + rte_errno = ENOMEM; + return -rte_errno; + } priv->reta_idx = mem; priv->reta_idx_n = reta_size; - if (old_size < reta_size) memset(&(*priv->reta_idx)[old_size], 0, (reta_size - old_size) * @@ -132,28 +147,31 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size) } /** - * Query RETA table. + * DPDK callback to get the RETA indirection table. * - * @param priv - * Pointer to private structure. - * @param[in, out] reta_conf - * Pointer to the first RETA configuration structure. + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. * @param reta_size - * Number of entries. + * Size of the RETA table. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -priv_dev_rss_reta_query(struct priv *priv, +int +mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, - unsigned int reta_size) + uint16_t reta_size) { + struct priv *priv = dev->data->dev_private; unsigned int idx; unsigned int i; - if (!reta_size || reta_size > priv->reta_idx_n) - return EINVAL; + if (!reta_size || reta_size > priv->reta_idx_n) { + rte_errno = EINVAL; + return -rte_errno; + } /* Fill each entry of the table even if its bit is not set. */ for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; @@ -164,34 +182,36 @@ priv_dev_rss_reta_query(struct priv *priv, } /** - * Update RETA table. + * DPDK callback to update the RETA indirection table. * - * @param priv - * Pointer to private structure. - * @param[in] reta_conf - * Pointer to the first RETA configuration structure. + * @param dev + * Pointer to Ethernet device structure. + * @param reta_conf + * Pointer to RETA configuration structure array. * @param reta_size - * Number of entries. + * Size of the RETA table. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static int -priv_dev_rss_reta_update(struct priv *priv, +int +mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, - unsigned int reta_size) + uint16_t reta_size) { + int ret; + struct priv *priv = dev->data->dev_private; unsigned int idx; unsigned int i; unsigned int pos; - int ret; - if (!reta_size) - return EINVAL; - ret = priv_rss_reta_index_resize(priv, reta_size); + if (!reta_size) { + rte_errno = EINVAL; + return -rte_errno; + } + ret = mlx5_rss_reta_index_resize(dev, reta_size); if (ret) return ret; - for (idx = 0, i = 0; (i != reta_size); ++i) { idx = i / RTE_RETA_GROUP_SIZE; pos = i % RTE_RETA_GROUP_SIZE; @@ -200,63 +220,9 @@ priv_dev_rss_reta_update(struct priv *priv, assert(reta_conf[idx].reta[pos] < priv->rxqs_n); (*priv->reta_idx)[i] = reta_conf[idx].reta[pos]; } - return 0; -} - -/** - * DPDK callback to get the RETA indirection table. - * - * @param dev - * Pointer to Ethernet device structure. - * @param reta_conf - * Pointer to RETA configuration structure array. - * @param reta_size - * Size of the RETA table. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -mlx5_dev_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) -{ - int ret; - struct priv *priv = dev->data->dev_private; - - priv_lock(priv); - ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size); - priv_unlock(priv); - return -ret; -} - -/** - * DPDK callback to update the RETA indirection table. - * - * @param dev - * Pointer to Ethernet device structure. - * @param reta_conf - * Pointer to RETA configuration structure array. - * @param reta_size - * Size of the RETA table. - * - * @return - * 0 on success, negative errno value on failure. - */ -int -mlx5_dev_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) -{ - int ret; - struct priv *priv = dev->data->dev_private; - - priv_lock(priv); - ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size); - priv_unlock(priv); if (dev->data->dev_started) { mlx5_dev_stop(dev); - mlx5_dev_start(dev); + return mlx5_dev_start(dev); } - return -ret; + return 0; } diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 4ffc869a..80824bc4 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -32,8 +32,15 @@ void mlx5_promiscuous_enable(struct rte_eth_dev *dev) { + int ret; + dev->data->promiscuous = 1; - mlx5_traffic_restart(dev); + if (((struct priv *)dev->data->dev_private)->config.vf) + mlx5_nl_promisc(dev, 1); + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot enable promiscuous mode: %s", + dev->data->port_id, strerror(rte_errno)); } /** @@ -45,8 +52,15 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev) void mlx5_promiscuous_disable(struct rte_eth_dev *dev) { + int ret; + dev->data->promiscuous = 0; - mlx5_traffic_restart(dev); + if (((struct priv *)dev->data->dev_private)->config.vf) + mlx5_nl_promisc(dev, 0); + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot disable promiscuous mode: %s", + dev->data->port_id, strerror(rte_errno)); } /** @@ -58,8 +72,15 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev) void mlx5_allmulticast_enable(struct rte_eth_dev *dev) { + int ret; + dev->data->all_multicast = 1; - mlx5_traffic_restart(dev); + if (((struct priv *)dev->data->dev_private)->config.vf) + mlx5_nl_allmulti(dev, 1); + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot enable allmulicast mode: %s", + dev->data->port_id, strerror(rte_errno)); } /** @@ -71,6 +92,13 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev) void mlx5_allmulticast_disable(struct rte_eth_dev *dev) { + int ret; + dev->data->all_multicast = 0; - mlx5_traffic_restart(dev); + if (((struct priv *)dev->data->dev_private)->config.vf) + mlx5_nl_allmulti(dev, 0); + ret = mlx5_traffic_restart(dev); + if (ret) + DRV_LOG(ERR, "port %u cannot disable allmulicast mode: %s", + dev->data->port_id, strerror(rte_errno)); } diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index ff58c492..de3f869e 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -55,7 +55,124 @@ uint8_t rss_hash_default_key[] = { const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); /** - * Allocate RX queue elements. + * Check whether Multi-Packet RQ can be enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 1 if supported, negative errno value if not. + */ +inline int +mlx5_check_mprq_support(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + + if (priv->config.mprq.enabled && + priv->rxqs_n >= priv->config.mprq.min_rxqs_num) + return 1; + return -ENOTSUP; +} + +/** + * Check whether Multi-Packet RQ is enabled for the Rx queue. + * + * @param rxq + * Pointer to receive queue structure. + * + * @return + * 0 if disabled, otherwise enabled. + */ +inline int +mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) +{ + return rxq->strd_num_n > 0; +} + +/** + * Check whether Multi-Packet RQ is enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 if disabled, otherwise enabled. + */ +inline int +mlx5_mprq_enabled(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + uint16_t i; + uint16_t n = 0; + + if (mlx5_check_mprq_support(dev) < 0) + return 0; + /* All the configured queues should be enabled. */ + for (i = 0; i < priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (!rxq) + continue; + if (mlx5_rxq_mprq_enabled(rxq)) + ++n; + } + /* Multi-Packet RQ can't be partially configured. */ + assert(n == 0 || n == priv->rxqs_n); + return n == priv->rxqs_n; +} + +/** + * Allocate RX queue elements for Multi-Packet RQ. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + unsigned int wqe_n = 1 << rxq->elts_n; + unsigned int i; + int err; + + /* Iterate on segments. */ + for (i = 0; i <= wqe_n; ++i) { + struct mlx5_mprq_buf *buf; + + if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) { + DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id); + rte_errno = ENOMEM; + goto error; + } + if (i < wqe_n) + (*rxq->mprq_bufs)[i] = buf; + else + rxq->mprq_repl = buf; + } + DRV_LOG(DEBUG, + "port %u Rx queue %u allocated and configured %u segments", + rxq->port_id, rxq_ctrl->idx, wqe_n); + return 0; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + wqe_n = i; + for (i = 0; (i != wqe_n); ++i) { + if ((*rxq->mprq_bufs)[i] != NULL) + rte_mempool_put(rxq->mprq_mp, + (*rxq->mprq_bufs)[i]); + (*rxq->mprq_bufs)[i] = NULL; + } + DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", + rxq->port_id, rxq_ctrl->idx); + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Allocate RX queue elements for Single-Packet RQ. * * @param rxq_ctrl * Pointer to RX queue structure. @@ -63,13 +180,13 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); * @return * 0 on success, errno value on failure. */ -int -rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +static int +rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; unsigned int i; - int ret = 0; + int err; /* Iterate on segments. */ for (i = 0; (i != elts_n); ++i) { @@ -77,8 +194,9 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); if (buf == NULL) { - ERROR("%p: empty mbuf pool", (void *)rxq_ctrl); - ret = ENOMEM; + DRV_LOG(ERR, "port %u empty mbuf pool", + PORT_ID(rxq_ctrl->priv)); + rte_errno = ENOMEM; goto error; } /* Headroom is reserved by rte_pktmbuf_alloc(). */ @@ -97,7 +215,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq_ctrl->rxq.elts)[i] = buf; } /* If Rx vector is activated. */ - if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { + if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; int j; @@ -118,30 +236,78 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j) (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf; } - DEBUG("%p: allocated and configured %u segments (max %u packets)", - (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); - assert(ret == 0); + DRV_LOG(DEBUG, + "port %u Rx queue %u allocated and configured %u segments" + " (max %u packets)", + PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n, + elts_n / (1 << rxq_ctrl->rxq.sges_n)); return 0; error: + err = rte_errno; /* Save rte_errno before cleanup. */ elts_n = i; for (i = 0; (i != elts_n); ++i) { if ((*rxq_ctrl->rxq.elts)[i] != NULL) rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); (*rxq_ctrl->rxq.elts)[i] = NULL; } - DEBUG("%p: failed, freed everything", (void *)rxq_ctrl); - assert(ret > 0); - return ret; + DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", + PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; } /** - * Free RX queue elements. + * Allocate RX queue elements. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl); +} + +/** + * Free RX queue elements for Multi-Packet RQ. * * @param rxq_ctrl * Pointer to RX queue structure. */ static void -rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + uint16_t i; + + DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs", + rxq->port_id, rxq_ctrl->idx); + if (rxq->mprq_bufs == NULL) + return; + assert(mlx5_rxq_check_vec_support(rxq) < 0); + for (i = 0; (i != (1u << rxq->elts_n)); ++i) { + if ((*rxq->mprq_bufs)[i] != NULL) + mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]); + (*rxq->mprq_bufs)[i] = NULL; + } + if (rxq->mprq_repl != NULL) { + mlx5_mprq_buf_free(rxq->mprq_repl); + rxq->mprq_repl = NULL; + } +} + +/** + * Free RX queue elements for Single-Packet RQ. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + */ +static void +rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; const uint16_t q_n = (1 << rxq->elts_n); @@ -149,14 +315,15 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); uint16_t i; - DEBUG("%p: freeing WRs", (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs", + PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); if (rxq->elts == NULL) return; /** * Some mbuf in the Ring belongs to the application. They cannot be * freed. */ - if (rxq_check_vec_support(rxq) > 0) { + if (mlx5_rxq_check_vec_support(rxq) > 0) { for (i = 0; i < used; ++i) (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; rxq->rq_pi = rxq->rq_ci; @@ -168,6 +335,21 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) } } +/** + * Free RX queue elements. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + */ +static void +rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) + rxq_free_elts_mprq(rxq_ctrl); + else + rxq_free_elts_sprq(rxq_ctrl); +} + /** * Clean up a RX queue. * @@ -179,24 +361,26 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { - DEBUG("cleaning up %p", (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u", + PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx); if (rxq_ctrl->ibv) - mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); + mlx5_rxq_ibv_release(rxq_ctrl->ibv); memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); } /** * Returns the per-queue supported offloads. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Supported Rx offloads. */ uint64_t -mlx5_priv_get_rx_queue_offloads(struct priv *priv) +mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_TIMESTAMP | @@ -217,46 +401,17 @@ mlx5_priv_get_rx_queue_offloads(struct priv *priv) /** * Returns the per-port supported offloads. * - * @param priv - * Pointer to private structure. * @return * Supported Rx offloads. */ uint64_t -mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused) +mlx5_get_rx_port_offloads(void) { uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param priv - * Pointer to private structure. - * @param offloads - * Per-queue offloads configuration. - * - * @return - * 1 if the configuration is valid, 0 otherwise. - */ -static int -priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) -{ - uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads; - uint64_t queue_supp_offloads = - mlx5_priv_get_rx_queue_offloads(priv); - uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv); - - if ((offloads & (queue_supp_offloads | port_supp_offloads)) != - offloads) - return 0; - if (((port_offloads ^ offloads) & port_supp_offloads)) - return 0; - return 1; -} - /** * * @param dev @@ -273,7 +428,7 @@ priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) * Memory pool for buffer allocations. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -284,53 +439,40 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); - int ret = 0; - priv_lock(priv); if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); - WARN("%p: increased number of descriptors in RX queue %u" - " to the next power of two (%d)", - (void *)dev, idx, desc); + DRV_LOG(WARNING, + "port %u increased number of descriptors in Rx queue %u" + " to the next power of two (%d)", + dev->data->port_id, idx, desc); } - DEBUG("%p: configuring queue %u for %u descriptors", - (void *)dev, idx, desc); + DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors", + dev->data->port_id, idx, desc); if (idx >= priv->rxqs_n) { - ERROR("%p: queue index out of range (%u >= %u)", - (void *)dev, idx, priv->rxqs_n); - priv_unlock(priv); - return -EOVERFLOW; - } - if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) { - ret = ENOTSUP; - ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port " - "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, - (void *)dev, conf->offloads, - dev->data->dev_conf.rxmode.offloads, - (mlx5_priv_get_rx_port_offloads(priv) | - mlx5_priv_get_rx_queue_offloads(priv))); - goto out; - } - if (!mlx5_priv_rxq_releasable(priv, idx)) { - ret = EBUSY; - ERROR("%p: unable to release queue index %u", - (void *)dev, idx); - goto out; - } - mlx5_priv_rxq_release(priv, idx); - rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp); + DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)", + dev->data->port_id, idx, priv->rxqs_n); + rte_errno = EOVERFLOW; + return -rte_errno; + } + if (!mlx5_rxq_releasable(dev, idx)) { + DRV_LOG(ERR, "port %u unable to release queue index %u", + dev->data->port_id, idx); + rte_errno = EBUSY; + return -rte_errno; + } + mlx5_rxq_release(dev, idx); + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); if (!rxq_ctrl) { - ERROR("%p: unable to allocate queue index %u", - (void *)dev, idx); - ret = ENOMEM; - goto out; + DRV_LOG(ERR, "port %u unable to allocate queue index %u", + dev->data->port_id, idx); + rte_errno = ENOMEM; + return -rte_errno; } - DEBUG("%p: adding RX queue %p to list", - (void *)dev, (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u adding Rx queue %u to list", + dev->data->port_id, idx); (*priv->rxqs)[idx] = &rxq_ctrl->rxq; -out: - priv_unlock(priv); - return -ret; + return 0; } /** @@ -350,45 +492,48 @@ mlx5_rx_queue_release(void *dpdk_rxq) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; - priv_lock(priv); - if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx)) - rte_panic("Rx queue %p is still used by a flow and cannot be" - " removed\n", (void *)rxq_ctrl); - mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx); - priv_unlock(priv); + if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx)) + rte_panic("port %u Rx queue %u is still used by a flow and" + " cannot be removed\n", + PORT_ID(priv), rxq_ctrl->idx); + mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx); } /** * Allocate queue vector and fill epoll fd list for Rx interrupts. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_rx_intr_vec_enable(struct priv *priv) +mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); unsigned int count = 0; - struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + struct rte_intr_handle *intr_handle = dev->intr_handle; - if (!priv->dev->data->dev_conf.intr_conf.rxq) + if (!dev->data->dev_conf.intr_conf.rxq) return 0; - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); if (intr_handle->intr_vec == NULL) { - ERROR("failed to allocate memory for interrupt vector," - " Rx interrupts will not be supported"); - return -ENOMEM; + DRV_LOG(ERR, + "port %u failed to allocate memory for interrupt" + " vector, Rx interrupts will not be supported", + dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; } intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { /* This rxq ibv must not be released in this function. */ - struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i); + struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i); int fd; int flags; int rc; @@ -402,27 +547,34 @@ priv_rx_intr_vec_enable(struct priv *priv) continue; } if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { - ERROR("too many Rx queues for interrupt vector size" - " (%d), Rx interrupts cannot be enabled", - RTE_MAX_RXTX_INTR_VEC_ID); - priv_rx_intr_vec_disable(priv); - return -1; + DRV_LOG(ERR, + "port %u too many Rx queues for interrupt" + " vector size (%d), Rx interrupts cannot be" + " enabled", + dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID); + mlx5_rx_intr_vec_disable(dev); + rte_errno = ENOMEM; + return -rte_errno; } fd = rxq_ibv->channel->fd; flags = fcntl(fd, F_GETFL); rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); if (rc < 0) { - ERROR("failed to make Rx interrupt file descriptor" - " %d non-blocking for queue index %d", fd, i); - priv_rx_intr_vec_disable(priv); - return -1; + rte_errno = errno; + DRV_LOG(ERR, + "port %u failed to make Rx interrupt file" + " descriptor %d non-blocking for queue index" + " %d", + dev->data->port_id, fd, i); + mlx5_rx_intr_vec_disable(dev); + return -rte_errno; } intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; intr_handle->efds[count] = fd; count++; } if (!count) - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); else intr_handle->nb_efd = count; return 0; @@ -431,18 +583,19 @@ priv_rx_intr_vec_enable(struct priv *priv) /** * Clean up Rx interrupts handler. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_rx_intr_vec_disable(struct priv *priv) +mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + struct priv *priv = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); - if (!priv->dev->data->dev_conf.intr_conf.rxq) + if (!dev->data->dev_conf.intr_conf.rxq) return; if (!intr_handle->intr_vec) goto free; @@ -459,7 +612,7 @@ priv_rx_intr_vec_disable(struct priv *priv) */ rxq_data = (*priv->rxqs)[i]; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv); + mlx5_rxq_ibv_release(rxq_ctrl->ibv); } free: rte_intr_free_epoll_fd(intr_handle); @@ -502,7 +655,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) * Rx queue number. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) @@ -510,31 +663,25 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; - int ret = 0; - priv_lock(priv); rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->irq) { struct mlx5_rxq_ibv *rxq_ibv; - rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); - mlx5_priv_rxq_ibv_release(priv, rxq_ibv); + mlx5_rxq_ibv_release(rxq_ibv); } -exit: - priv_unlock(priv); - if (ret) - WARN("unable to arm interrupt on rx queue %d", rx_queue_id); - return -ret; + return 0; } /** @@ -546,7 +693,7 @@ exit: * Rx queue number. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) @@ -557,53 +704,54 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct mlx5_rxq_ibv *rxq_ibv = NULL; struct ibv_cq *ev_cq; void *ev_ctx; - int ret = 0; + int ret; - priv_lock(priv); rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (!rxq_ctrl->irq) - goto exit; - rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + return 0; + rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx); if (ret || ev_cq != rxq_ibv->cq) { - ret = EINVAL; + rte_errno = EINVAL; goto exit; } rxq_data->cq_arm_sn++; mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); + return 0; exit: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (rxq_ibv) - mlx5_priv_rxq_ibv_release(priv, rxq_ibv); - priv_unlock(priv); - if (ret) - WARN("unable to disable interrupt on rx queue %d", - rx_queue_id); - return -ret; + mlx5_rxq_ibv_release(rxq_ibv); + DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d", + dev->data->port_id, rx_queue_id); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** * Create the Rx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * * @return - * The Verbs object initialised if it can be created. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_rxq_ibv* -mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) +struct mlx5_rxq_ibv * +mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); @@ -613,10 +761,16 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) struct ibv_cq_init_attr_ex ibv; struct mlx5dv_cq_init_attr mlx5; } cq; - struct ibv_wq_init_attr wq; + struct { + struct ibv_wq_init_attr ibv; +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + struct mlx5dv_wq_init_attr mlx5; +#endif + } wq; struct ibv_cq_ex cq_attr; } attr; - unsigned int cqe_n = (1 << rxq_data->elts_n) - 1; + unsigned int cqe_n; + unsigned int wqe_n = 1 << rxq_data->elts_n; struct mlx5_rxq_ibv *tmpl; struct mlx5dv_cq cq_info; struct mlx5dv_rwq rwq; @@ -624,6 +778,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) int ret = 0; struct mlx5dv_obj obj; struct mlx5_dev_config *config = &priv->config; + const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data); assert(rxq_data); assert(!rxq_ctrl->ibv); @@ -632,28 +787,26 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, rxq_ctrl->socket); if (!tmpl) { - ERROR("%p: cannot allocate verbs resources", - (void *)rxq_ctrl); + DRV_LOG(ERR, + "port %u Rx queue %u cannot allocate verbs resources", + dev->data->port_id, rxq_ctrl->idx); + rte_errno = ENOMEM; goto error; } tmpl->rxq_ctrl = rxq_ctrl; - /* Use the entire RX mempool as the memory region. */ - tmpl->mr = priv_mr_get(priv, rxq_data->mp); - if (!tmpl->mr) { - tmpl->mr = priv_mr_new(priv, rxq_data->mp); - if (!tmpl->mr) { - ERROR("%p: MR creation failure", (void *)rxq_ctrl); - goto error; - } - } if (rxq_ctrl->irq) { tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx); if (!tmpl->channel) { - ERROR("%p: Comp Channel creation failure", - (void *)rxq_ctrl); + DRV_LOG(ERR, "port %u: comp channel creation failure", + dev->data->port_id); + rte_errno = ENOMEM; goto error; } } + if (mprq_en) + cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1; + else + cqe_n = wqe_n - 1; attr.cq.ibv = (struct ibv_cq_init_attr_ex){ .cqe = cqe_n, .channel = tmpl->channel, @@ -670,27 +823,32 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) * For vectorized Rx, it must not be doubled in order to * make cq_ci and rq_ci aligned. */ - if (rxq_check_vec_support(rxq_data) < 0) + if (mlx5_rxq_check_vec_support(rxq_data) < 0) attr.cq.ibv.cqe *= 2; } else if (config->cqe_comp && rxq_data->hw_timestamp) { - DEBUG("Rx CQE compression is disabled for HW timestamp"); + DRV_LOG(DEBUG, + "port %u Rx CQE compression is disabled for HW" + " timestamp", + dev->data->port_id); } tmpl->cq = mlx5_glue->cq_ex_to_cq (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv, &attr.cq.mlx5)); if (tmpl->cq == NULL) { - ERROR("%p: CQ creation failure", (void *)rxq_ctrl); + DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure", + dev->data->port_id, idx); + rte_errno = ENOMEM; goto error; } - DEBUG("priv->device_attr.max_qp_wr is %d", - priv->device_attr.orig_attr.max_qp_wr); - DEBUG("priv->device_attr.max_sge is %d", - priv->device_attr.orig_attr.max_sge); - attr.wq = (struct ibv_wq_init_attr){ + DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr); + DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_sge); + attr.wq.ibv = (struct ibv_wq_init_attr){ .wq_context = NULL, /* Could be useful in the future. */ .wq_type = IBV_WQT_RQ, /* Max number of outstanding WRs. */ - .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n, + .max_wr = wqe_n >> rxq_data->sges_n, /* Max number of scatter/gather elements in a WR. */ .max_sge = 1 << rxq_data->sges_n, .pd = priv->pd, @@ -704,32 +862,54 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) }; /* By default, FCS (CRC) is stripped by hardware. */ if (rxq_data->crc_present) { - attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; - attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; + attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; + attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; } #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING if (config->hw_padding) { - attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; - attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; + attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; + attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; } #endif - tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq); +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){ + .comp_mask = 0, + }; + if (mprq_en) { + struct mlx5dv_striding_rq_init_attr *mprq_attr = + &attr.wq.mlx5.striding_rq_attrs; + + attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ; + *mprq_attr = (struct mlx5dv_striding_rq_init_attr){ + .single_stride_log_num_of_bytes = rxq_data->strd_sz_n, + .single_wqe_log_num_of_strides = rxq_data->strd_num_n, + .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT, + }; + } + tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv, + &attr.wq.mlx5); +#else + tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv); +#endif if (tmpl->wq == NULL) { - ERROR("%p: WQ creation failure", (void *)rxq_ctrl); + DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure", + dev->data->port_id, idx); + rte_errno = ENOMEM; goto error; } /* * Make sure number of WRs*SGEs match expectations since a queue * cannot allocate more than "desc" buffers. */ - if (((int)attr.wq.max_wr != - ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) || - ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) { - ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs", - (void *)rxq_ctrl, - ((1 << rxq_data->elts_n) >> rxq_data->sges_n), - (1 << rxq_data->sges_n), - attr.wq.max_wr, attr.wq.max_sge); + if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) || + attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) { + DRV_LOG(ERR, + "port %u Rx queue %u requested %u*%u but got %u*%u" + " WRs*SGEs", + dev->data->port_id, idx, + wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n), + attr.wq.ibv.max_wr, attr.wq.ibv.max_sge); + rte_errno = EINVAL; goto error; } /* Change queue state to ready. */ @@ -739,8 +919,10 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) }; ret = mlx5_glue->modify_wq(tmpl->wq, &mod); if (ret) { - ERROR("%p: WQ state to IBV_WQS_RDY failed", - (void *)rxq_ctrl); + DRV_LOG(ERR, + "port %u Rx queue %u WQ state to IBV_WQS_RDY failed", + dev->data->port_id, idx); + rte_errno = ret; goto error; } obj.cq.in = tmpl->cq; @@ -748,33 +930,53 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) obj.rwq.in = tmpl->wq; obj.rwq.out = &rwq; ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ); - if (ret != 0) + if (ret) { + rte_errno = ret; goto error; + } if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { - ERROR("Wrong MLX5_CQE_SIZE environment variable value: " - "it should be set to %u", RTE_CACHE_LINE_SIZE); + DRV_LOG(ERR, + "port %u wrong MLX5_CQE_SIZE environment variable" + " value: it should be set to %u", + dev->data->port_id, RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; goto error; } /* Fill the rings. */ - rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[]) - (uintptr_t)rwq.buf; - for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) { - struct rte_mbuf *buf = (*rxq_data->elts)[i]; - volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i]; - + rxq_data->wqes = rwq.buf; + for (i = 0; (i != wqe_n); ++i) { + volatile struct mlx5_wqe_data_seg *scat; + uintptr_t addr; + uint32_t byte_count; + + if (mprq_en) { + struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i]; + + scat = &((volatile struct mlx5_wqe_mprq *) + rxq_data->wqes)[i].dseg; + addr = (uintptr_t)mlx5_mprq_buf_addr(buf); + byte_count = (1 << rxq_data->strd_sz_n) * + (1 << rxq_data->strd_num_n); + } else { + struct rte_mbuf *buf = (*rxq_data->elts)[i]; + + scat = &((volatile struct mlx5_wqe_data_seg *) + rxq_data->wqes)[i]; + addr = rte_pktmbuf_mtod(buf, uintptr_t); + byte_count = DATA_LEN(buf); + } /* scat->addr must be able to store a pointer. */ assert(sizeof(scat->addr) >= sizeof(uintptr_t)); *scat = (struct mlx5_wqe_data_seg){ - .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, - uintptr_t)), - .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), - .lkey = tmpl->mr->lkey, + .addr = rte_cpu_to_be_64(addr), + .byte_count = rte_cpu_to_be_32(byte_count), + .lkey = mlx5_rx_addr2mr(rxq_data, addr), }; } rxq_data->rq_db = rwq.dbrec; rxq_data->cqe_n = log2above(cq_info.cqe_cnt); rxq_data->cq_ci = 0; - rxq_data->rq_ci = 0; + rxq_data->strd_ci = 0; rxq_data->rq_pi = 0; rxq_data->zip = (struct rxq_zip){ .ai = 0, @@ -785,43 +987,45 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) rxq_data->cqn = cq_info.cqn; rxq_data->cq_arm_sn = 0; /* Update doorbell counter. */ - rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n; + rxq_data->rq_ci = wqe_n >> rxq_data->sges_n; rte_wmb(); *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci); - DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); + DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, + idx, (void *)&tmpl); rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, - (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); + DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", + dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return tmpl; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (tmpl->wq) claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); if (tmpl->cq) claim_zero(mlx5_glue->destroy_cq(tmpl->cq)); if (tmpl->channel) claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel)); - if (tmpl->mr) - priv_mr_release(priv, tmpl->mr); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ return NULL; } /** * Get an Rx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * * @return * The Verbs object if it exists. */ -struct mlx5_rxq_ibv* -mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) +struct mlx5_rxq_ibv * +mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl; @@ -831,11 +1035,10 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) return NULL; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->ibv) { - priv_mr_get(priv, rxq_data->mp); rte_atomic32_inc(&rxq_ctrl->ibv->refcnt); - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ctrl->ibv, - rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); + DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", + dev->data->port_id, rxq_ctrl->idx, + rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); } return rxq_ctrl->ibv; } @@ -843,28 +1046,21 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) /** * Release an Rx verbs queue object. * - * @param priv - * Pointer to private structure. * @param rxq_ibv * Verbs Rx queue object. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) { - int ret; - assert(rxq_ibv); assert(rxq_ibv->wq); assert(rxq_ibv->cq); - assert(rxq_ibv->mr); - ret = priv_mr_release(priv, rxq_ibv->mr); - if (!ret) - rxq_ibv->mr = NULL; - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt)); + DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", + PORT_ID(rxq_ibv->rxq_ctrl->priv), + rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { rxq_free_elts(rxq_ibv->rxq_ctrl); claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq)); @@ -876,26 +1072,28 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) rte_free(rxq_ibv); return 0; } - return EBUSY; + return 1; } /** * Verify the Verbs Rx queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_rxq_ibv_verify(struct priv *priv) +mlx5_rxq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_rxq_ibv *rxq_ibv; LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { - DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv, - (void *)rxq_ibv); + DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced", + dev->data->port_id, rxq_ibv->rxq_ctrl->idx); ++ret; } return ret; @@ -904,65 +1102,272 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv) /** * Return true if a single reference exists on the object. * - * @param priv - * Pointer to private structure. * @param rxq_ibv * Verbs Rx queue object. */ int -mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv) { - (void)priv; assert(rxq_ibv); return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); } +/** + * Callback function to initialize mbufs for Multi-Packet RQ. + */ +static inline void +mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused, + void *_m, unsigned int i __rte_unused) +{ + struct mlx5_mprq_buf *buf = _m; + + memset(_m, 0, sizeof(*buf)); + buf->mp = mp; + rte_atomic16_set(&buf->refcnt, 1); +} + +/** + * Free mempool of Multi-Packet RQ. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_mprq_free_mp(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct rte_mempool *mp = priv->mprq_mp; + unsigned int i; + + if (mp == NULL) + return 0; + DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ", + dev->data->port_id, mp->name); + /* + * If a buffer in the pool has been externally attached to a mbuf and it + * is still in use by application, destroying the Rx qeueue can spoil + * the packet. It is unlikely to happen but if application dynamically + * creates and destroys with holding Rx packets, this can happen. + * + * TODO: It is unavoidable for now because the mempool for Multi-Packet + * RQ isn't provided by application but managed by PMD. + */ + if (!rte_mempool_full(mp)) { + DRV_LOG(ERR, + "port %u mempool for Multi-Packet RQ is still in use", + dev->data->port_id); + rte_errno = EBUSY; + return -rte_errno; + } + rte_mempool_free(mp); + /* Unset mempool for each Rx queue. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + rxq->mprq_mp = NULL; + } + return 0; +} + +/** + * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the + * mempool. If already allocated, reuse it if there're enough elements. + * Otherwise, resize it. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct rte_mempool *mp = priv->mprq_mp; + char name[RTE_MEMPOOL_NAMESIZE]; + unsigned int desc = 0; + unsigned int buf_len; + unsigned int obj_num; + unsigned int obj_size; + unsigned int strd_num_n = 0; + unsigned int strd_sz_n = 0; + unsigned int i; + + if (!mlx5_mprq_enabled(dev)) + return 0; + /* Count the total number of descriptors configured. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + desc += 1 << rxq->elts_n; + /* Get the max number of strides. */ + if (strd_num_n < rxq->strd_num_n) + strd_num_n = rxq->strd_num_n; + /* Get the max size of a stride. */ + if (strd_sz_n < rxq->strd_sz_n) + strd_sz_n = rxq->strd_sz_n; + } + assert(strd_num_n && strd_sz_n); + buf_len = (1 << strd_num_n) * (1 << strd_sz_n); + obj_size = buf_len + sizeof(struct mlx5_mprq_buf); + /* + * Received packets can be either memcpy'd or externally referenced. In + * case that the packet is attached to an mbuf as an external buffer, as + * it isn't possible to predict how the buffers will be queued by + * application, there's no option to exactly pre-allocate needed buffers + * in advance but to speculatively prepares enough buffers. + * + * In the data path, if this Mempool is depleted, PMD will try to memcpy + * received packets to buffers provided by application (rxq->mp) until + * this Mempool gets available again. + */ + desc *= 4; + obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n; + /* Check a mempool is already allocated and if it can be resued. */ + if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) { + DRV_LOG(DEBUG, "port %u mempool %s is being reused", + dev->data->port_id, mp->name); + /* Reuse. */ + goto exit; + } else if (mp != NULL) { + DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it", + dev->data->port_id, mp->name); + /* + * If failed to free, which means it may be still in use, no way + * but to keep using the existing one. On buffer underrun, + * packets will be memcpy'd instead of external buffer + * attachment. + */ + if (mlx5_mprq_free_mp(dev)) { + if (mp->elt_size >= obj_size) + goto exit; + else + return -rte_errno; + } + } + snprintf(name, sizeof(name), "%s-mprq", dev->device->name); + mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, + 0, NULL, NULL, mlx5_mprq_buf_init, NULL, + dev->device->numa_node, 0); + if (mp == NULL) { + DRV_LOG(ERR, + "port %u failed to allocate a mempool for" + " Multi-Packet RQ, count=%u, size=%u", + dev->data->port_id, obj_num, obj_size); + rte_errno = ENOMEM; + return -rte_errno; + } + priv->mprq_mp = mp; +exit: + /* Set mempool for each Rx queue. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + rxq->mprq_mp = mp; + } + DRV_LOG(INFO, "port %u Multi-Packet RQ is configured", + dev->data->port_id); + return 0; +} + /** * Create a DPDK Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx - * TX queue index. + * RX queue index. * @param desc * Number of descriptors to configure in queue. * @param socket * NUMA socket on which memory must be allocated. * * @return - * A DPDK queue object on success. + * A DPDK queue object on success, NULL otherwise and rte_errno is set. */ -struct mlx5_rxq_ctrl* -mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, - unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) +struct mlx5_rxq_ctrl * +mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) { - struct rte_eth_dev *dev = priv->dev; + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; unsigned int mb_len = rte_pktmbuf_data_room_size(mp); + unsigned int mprq_stride_size; struct mlx5_dev_config *config = &priv->config; /* * Always allocate extra slots, even if eventually * the vector Rx will not be used. */ - const uint16_t desc_n = + uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; + uint64_t offloads = conf->offloads | + dev->data->dev_conf.rxmode.offloads; + const int mprq_en = mlx5_check_mprq_support(dev) > 0; tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *), 0, socket); - if (!tmpl) + if (!tmpl) { + rte_errno = ENOMEM; return NULL; + } + if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh, + MLX5_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } tmpl->socket = socket; - if (priv->dev->data->dev_conf.intr_conf.rxq) + if (dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; - /* Enable scattered packets support for this queue if necessary. */ + /* + * This Rx queue can be configured as a Multi-Packet RQ if all of the + * following conditions are met: + * - MPRQ is enabled. + * - The number of descs is more than the number of strides. + * - max_rx_pkt_len plus overhead is less than the max size of a + * stride. + * Otherwise, enable Rx scatter if necessary. + */ assert(mb_len >= RTE_PKTMBUF_HEADROOM); - if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= - (mb_len - RTE_PKTMBUF_HEADROOM)) { + mprq_stride_size = + dev->data->dev_conf.rxmode.max_rx_pkt_len + + sizeof(struct rte_mbuf_ext_shared_info) + + RTE_PKTMBUF_HEADROOM; + if (mprq_en && + desc >= (1U << config->mprq.stride_num_n) && + mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) { + /* TODO: Rx scatter isn't supported yet. */ + tmpl->rxq.sges_n = 0; + /* Trim the number of descs needed. */ + desc >>= config->mprq.stride_num_n; + tmpl->rxq.strd_num_n = config->mprq.stride_num_n; + tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size), + config->mprq.min_stride_size_n); + tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; + tmpl->rxq.mprq_max_memcpy_len = + RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM, + config->mprq.max_memcpy_len); + DRV_LOG(DEBUG, + "port %u Rx queue %u: Multi-Packet RQ is enabled" + " strd_num_n = %u, strd_sz_n = %u", + dev->data->port_id, idx, + tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); + } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= + (mb_len - RTE_PKTMBUF_HEADROOM)) { tmpl->rxq.sges_n = 0; - } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) { + } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { unsigned int size = RTE_PKTMBUF_HEADROOM + dev->data->dev_conf.rxmode.max_rx_pkt_len; @@ -978,57 +1383,63 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, size = mb_len * (1 << tmpl->rxq.sges_n); size -= RTE_PKTMBUF_HEADROOM; if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { - ERROR("%p: too many SGEs (%u) needed to handle" - " requested maximum packet size %u", - (void *)dev, - 1 << sges_n, - dev->data->dev_conf.rxmode.max_rx_pkt_len); + DRV_LOG(ERR, + "port %u too many SGEs (%u) needed to handle" + " requested maximum packet size %u", + dev->data->port_id, + 1 << sges_n, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + rte_errno = EOVERFLOW; goto error; } } else { - WARN("%p: the requested maximum Rx packet size (%u) is" - " larger than a single mbuf (%u) and scattered" - " mode has not been requested", - (void *)dev, - dev->data->dev_conf.rxmode.max_rx_pkt_len, - mb_len - RTE_PKTMBUF_HEADROOM); - } - DEBUG("%p: maximum number of segments per packet: %u", - (void *)dev, 1 << tmpl->rxq.sges_n); + DRV_LOG(WARNING, + "port %u the requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered mode has" + " not been requested", + dev->data->port_id, + dev->data->dev_conf.rxmode.max_rx_pkt_len, + mb_len - RTE_PKTMBUF_HEADROOM); + } + DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", + dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { - ERROR("%p: number of RX queue descriptors (%u) is not a" - " multiple of SGEs per packet (%u)", - (void *)dev, - desc, - 1 << tmpl->rxq.sges_n); + DRV_LOG(ERR, + "port %u number of Rx queue descriptors (%u) is not a" + " multiple of SGEs per packet (%u)", + dev->data->port_id, + desc, + 1 << tmpl->rxq.sges_n); + rte_errno = EINVAL; goto error; } /* Toggle RX checksum offload if hardware supports it. */ - tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM); - tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) && - priv->config.hw_csum_l2tun); - tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP); + tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM); + tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP); /* Configure VLAN stripping. */ - tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ - if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) { + if (offloads & DEV_RX_OFFLOAD_CRC_STRIP) { tmpl->rxq.crc_present = 0; } else if (config->hw_fcs_strip) { tmpl->rxq.crc_present = 1; } else { - WARN("%p: CRC stripping has been disabled but will still" - " be performed by hardware, make sure MLNX_OFED and" - " firmware are up to date", - (void *)dev); + DRV_LOG(WARNING, + "port %u CRC stripping has been disabled but will" + " still be performed by hardware, make sure MLNX_OFED" + " and firmware are up to date", + dev->data->port_id); tmpl->rxq.crc_present = 0; } - DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from" - " incoming frames to hide it", - (void *)dev, - tmpl->rxq.crc_present ? "disabled" : "enabled", - tmpl->rxq.crc_present << 2); + DRV_LOG(DEBUG, + "port %u CRC stripping is %s, %u bytes will be subtracted from" + " incoming frames to hide it", + dev->data->port_id, + tmpl->rxq.crc_present ? "disabled" : "enabled", + tmpl->rxq.crc_present << 2); /* Save port ID. */ - tmpl->rxq.rss_hash = priv->rxqs_n > 1; + tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf && + (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS)); tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; tmpl->rxq.mp = mp; @@ -1036,9 +1447,10 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.elts = (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); + tmpl->idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, - (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); + DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id, + idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: @@ -1049,28 +1461,29 @@ error: /** * Get a Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * A pointer to the queue if it exists. + * A pointer to the queue if it exists, NULL otherwise. */ -struct mlx5_rxq_ctrl* -mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) +struct mlx5_rxq_ctrl * +mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl = NULL; if ((*priv->rxqs)[idx]) { rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - - mlx5_priv_rxq_ibv_get(priv, idx); + mlx5_rxq_ibv_get(dev, idx); rte_atomic32_inc(&rxq_ctrl->refcnt); - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); + DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", + dev->data->port_id, rxq_ctrl->idx, + rte_atomic32_read(&rxq_ctrl->refcnt)); } return rxq_ctrl; } @@ -1078,59 +1491,60 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) /** * Release a Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) +mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; if (!(*priv->rxqs)[idx]) return 0; rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); assert(rxq_ctrl->priv); - if (rxq_ctrl->ibv) { - int ret; - - ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); - if (!ret) - rxq_ctrl->ibv = NULL; - } - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); + if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv)) + rxq_ctrl->ibv = NULL; + DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id, + rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { + mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh); LIST_REMOVE(rxq_ctrl, next); rte_free(rxq_ctrl); (*priv->rxqs)[idx] = NULL; return 0; } - return EBUSY; + return 1; } /** * Verify if the queue can be released. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * 1 if the queue can be released. + * 1 if the queue can be released, negative errno otherwise and rte_errno is + * set. */ int -mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) +mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; - if (!(*priv->rxqs)[idx]) - return -1; + if (!(*priv->rxqs)[idx]) { + rte_errno = EINVAL; + return -rte_errno; + } rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1); } @@ -1138,20 +1552,22 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_rxq_verify(struct priv *priv) +mlx5_rxq_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; int ret = 0; LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { - DEBUG("%p: Rx Queue %p still referenced", (void *)priv, - (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced", + dev->data->port_id, rxq_ctrl->idx); ++ret; } return ret; @@ -1160,20 +1576,21 @@ mlx5_priv_rxq_verify(struct priv *priv) /** * Create an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param queues * Queues entering in the indirection table. * @param queues_n * Number of queues in the array. * * @return - * A new indirection table. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_ind_table_ibv* -mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], - uint16_t queues_n) +struct mlx5_ind_table_ibv * +mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues, + uint32_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; const unsigned int wq_n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : @@ -1184,11 +1601,12 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) + queues_n * sizeof(uint16_t), 0); - if (!ind_tbl) + if (!ind_tbl) { + rte_errno = ENOMEM; return NULL; + } for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = - mlx5_priv_rxq_get(priv, queues[i]); + struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); if (!rxq) goto error; @@ -1206,24 +1624,28 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], .ind_tbl = wq, .comp_mask = 0, }); - if (!ind_tbl->ind_table) + if (!ind_tbl->ind_table) { + rte_errno = errno; goto error; + } rte_atomic32_inc(&ind_tbl->refcnt); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, - (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + DEBUG("port %u new indirection table %p: queues:%u refcnt:%d", + dev->data->port_id, (void *)ind_tbl, 1 << wq_n, + rte_atomic32_read(&ind_tbl->refcnt)); return ind_tbl; error: rte_free(ind_tbl); - DEBUG("%p cannot create indirection table", (void *)priv); + DRV_LOG(DEBUG, "port %u cannot create indirection table", + dev->data->port_id); return NULL; } /** * Get an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param queues * Queues entering in the indirection table. * @param queues_n @@ -1232,10 +1654,11 @@ error: * @return * An indirection table if found. */ -struct mlx5_ind_table_ibv* -mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], - uint16_t queues_n) +struct mlx5_ind_table_ibv * +mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues, + uint32_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { @@ -1249,10 +1672,11 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], unsigned int i; rte_atomic32_inc(&ind_tbl->refcnt); - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, - (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", + dev->data->port_id, (void *)ind_tbl, + rte_atomic32_read(&ind_tbl->refcnt)); for (i = 0; i != ind_tbl->queues_n; ++i) - mlx5_priv_rxq_get(priv, ind_tbl->queues[i]); + mlx5_rxq_get(dev, ind_tbl->queues[i]); } return ind_tbl; } @@ -1260,52 +1684,59 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], /** * Release an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param ind_table * Indirection table to release. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_ind_table_ibv_release(struct priv *priv, - struct mlx5_ind_table_ibv *ind_tbl) +mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_ibv *ind_tbl) { unsigned int i; - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, - (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); - if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) + DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", + dev->data->port_id, (void *)ind_tbl, + rte_atomic32_read(&ind_tbl->refcnt)); + if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) { claim_zero(mlx5_glue->destroy_rwq_ind_table (ind_tbl->ind_table)); + DEBUG("port %u delete indirection table %p: queues: %u", + dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n); + } for (i = 0; i != ind_tbl->queues_n; ++i) - claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i])); + claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); if (!rte_atomic32_read(&ind_tbl->refcnt)) { LIST_REMOVE(ind_tbl, next); rte_free(ind_tbl); return 0; } - return EBUSY; + return 1; } /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_ind_table_ibv_verify(struct priv *priv) +mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; int ret = 0; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { - DEBUG("%p: Verbs indirection table %p still referenced", - (void *)priv, (void *)ind_tbl); + DRV_LOG(DEBUG, + "port %u Verbs indirection table %p still referenced", + dev->data->port_id, (void *)ind_tbl); ++ret; } return ret; @@ -1314,8 +1745,8 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) /** * Create an Rx Hash queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param rss_key * RSS key for the Rx hash queue. * @param rss_key_len @@ -1327,24 +1758,79 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. + * @param tunnel + * Tunnel type, implies tunnel offloading like inner checksum if available. + * @param rss_level + * RSS hash on tunnel level. * * @return - * An hash Rx queue on success. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_hrxq* -mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, - uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +struct mlx5_hrxq * +mlx5_hrxq_new(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + uint32_t tunnel, uint32_t rss_level) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; + int err; +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + struct mlx5dv_qp_init_attr qp_init_attr = {0}; +#endif queues_n = hash_fields ? queues_n : 1; - ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); - if (!ind_tbl) - ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n); + ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) + ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n); + if (!ind_tbl) { + rte_errno = ENOMEM; return NULL; + } + if (!rss_key_len) { + rss_key_len = rss_hash_default_key_len; + rss_key = rss_hash_default_key; + } +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (tunnel) { + qp_init_attr.comp_mask = + MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; + qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS; + } + qp = mlx5_glue->dv_create_qp + (priv->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_key_len ? rss_key_len : + rss_hash_default_key_len, + .rx_hash_key = rss_key ? + (void *)(uintptr_t)rss_key : + rss_hash_default_key, + .rx_hash_fields_mask = hash_fields | + (tunnel && rss_level > 1 ? + (uint32_t)IBV_RX_HASH_INNER : 0), + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->pd, + }, + &qp_init_attr); + DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64 + " tunnel:0x%x level:%u dv_attr:comp_mask:0x%" PRIx64 + " create_flags:0x%x", + dev->data->port_id, (void *)qp, (void *)ind_tbl, + (tunnel && rss_level == 2 ? (uint32_t)IBV_RX_HASH_INNER : 0) | + hash_fields, tunnel, rss_level, + qp_init_attr.comp_mask, qp_init_attr.create_flags); +#else qp = mlx5_glue->create_qp_ex (priv->ctx, &(struct ibv_qp_init_attr_ex){ @@ -1355,15 +1841,25 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, IBV_QP_INIT_ATTR_RX_HASH, .rx_hash_conf = (struct ibv_rx_hash_conf){ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_key_len, - .rx_hash_key = rss_key, + .rx_hash_key_len = rss_key_len ? rss_key_len : + rss_hash_default_key_len, + .rx_hash_key = rss_key ? + (void *)(uintptr_t)rss_key : + rss_hash_default_key, .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, .pd = priv->pd, }); - if (!qp) + DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%" PRIx64 + " tunnel:0x%x level:%hhu", + dev->data->port_id, (void *)qp, (void *)ind_tbl, + hash_fields, tunnel, rss_level); +#endif + if (!qp) { + rte_errno = errno; goto error; + } hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0); if (!hrxq) goto error; @@ -1371,24 +1867,29 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, hrxq->qp = qp; hrxq->rss_key_len = rss_key_len; hrxq->hash_fields = hash_fields; + hrxq->tunnel = tunnel; + hrxq->rss_level = rss_level; memcpy(hrxq->rss_key, rss_key, rss_key_len); rte_atomic32_inc(&hrxq->refcnt); LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, - (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); + DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", + dev->data->port_id, (void *)hrxq, + rte_atomic32_read(&hrxq->refcnt)); return hrxq; error: - mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + err = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_ind_table_ibv_release(dev, ind_tbl); if (qp) claim_zero(mlx5_glue->destroy_qp(qp)); + rte_errno = err; /* Restore rte_errno. */ return NULL; } /** * Get an Rx Hash queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param rss_conf * RSS configuration for the Rx hash queue. * @param queues @@ -1396,14 +1897,22 @@ error: * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. + * @param tunnel + * Tunnel type, implies tunnel offloading like inner checksum if available. + * @param rss_level + * RSS hash on tunnel level * * @return * An hash Rx queue on success. */ -struct mlx5_hrxq* -mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, - uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +struct mlx5_hrxq * +mlx5_hrxq_get(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + uint32_t tunnel, uint32_t rss_level) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; queues_n = hash_fields ? queues_n : 1; @@ -1416,16 +1925,21 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, continue; if (hrxq->hash_fields != hash_fields) continue; - ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); + if (hrxq->tunnel != tunnel) + continue; + if (hrxq->rss_level != rss_level) + continue; + ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) continue; if (ind_tbl != hrxq->ind_table) { - mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + mlx5_ind_table_ibv_release(dev, ind_tbl); continue; } rte_atomic32_inc(&hrxq->refcnt); - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, - (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); + DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", + dev->data->port_id, (void *)hrxq, + rte_atomic32_read(&hrxq->refcnt)); return hrxq; } return NULL; @@ -1434,47 +1948,55 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, /** * Release the hash Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param hrxq * Pointer to Hash Rx queue to release. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq) +mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) { - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, - (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); + DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", + dev->data->port_id, (void *)hrxq, + rte_atomic32_read(&hrxq->refcnt)); if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table); + DEBUG("port %u delete QP %p: hash: 0x%" PRIx64 ", tunnel:" + " 0x%x, level: %u", + dev->data->port_id, (void *)hrxq, hrxq->hash_fields, + hrxq->tunnel, hrxq->rss_level); + mlx5_ind_table_ibv_release(dev, hrxq->ind_table); LIST_REMOVE(hrxq, next); rte_free(hrxq); return 0; } - claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table)); - return EBUSY; + claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table)); + return 1; } /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_hrxq_ibv_verify(struct priv *priv) +mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; int ret = 0; LIST_FOREACH(hrxq, &priv->hrxqs, next) { - DEBUG("%p: Verbs Hash Rx queue %p still referenced", - (void *)priv, (void *)hrxq); + DRV_LOG(DEBUG, + "port %u Verbs hash Rx queue %p still referenced", + dev->data->port_id, (void *)hrxq); ++ret; } return ret; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index dc4ead93..52785946 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -34,19 +34,29 @@ #include "mlx5_prm.h" static __rte_always_inline uint32_t -rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe); +rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); static __rte_always_inline int mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, uint16_t cqe_cnt, uint32_t *rss_hash); static __rte_always_inline uint32_t -rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe); +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); + +static __rte_always_inline void +rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, + volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res); + +static __rte_always_inline void +mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx); uint32_t mlx5_ptype_table[] __rte_cache_aligned = { [0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */ }; +uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned; +uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned; + /** * Build a table to translate Rx completion flags to packet type. * @@ -86,6 +96,14 @@ mlx5_set_ptype_table(void) RTE_PTYPE_L4_TCP; (*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP; + (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; /* UDP */ (*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP; @@ -104,17 +122,27 @@ mlx5_set_ptype_table(void) RTE_PTYPE_L4_TCP; (*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP; + (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; + (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP; (*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP; (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP; /* Tunneled - L3 */ + (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_NONFRAG; (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_NONFRAG; + (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_NONFRAG; @@ -141,12 +169,36 @@ mlx5_set_ptype_table(void) (*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP; + (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; (*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP; (*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP; + (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; + (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP; /* Tunneled - UDP */ (*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | @@ -162,6 +214,74 @@ mlx5_set_ptype_table(void) RTE_PTYPE_INNER_L4_UDP; } +/** + * Build a table to translate packet to checksum type of Verbs. + */ +void +mlx5_set_cksum_table(void) +{ + unsigned int i; + uint8_t v; + + /* + * The index should have: + * bit[0] = PKT_TX_TCP_SEG + * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM + * bit[4] = PKT_TX_IP_CKSUM + * bit[8] = PKT_TX_OUTER_IP_CKSUM + * bit[9] = tunnel + */ + for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) { + v = 0; + if (i & (1 << 9)) { + /* Tunneled packet. */ + if (i & (1 << 8)) /* Outer IP. */ + v |= MLX5_ETH_WQE_L3_CSUM; + if (i & (1 << 4)) /* Inner IP. */ + v |= MLX5_ETH_WQE_L3_INNER_CSUM; + if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ + v |= MLX5_ETH_WQE_L4_INNER_CSUM; + } else { + /* No tunnel. */ + if (i & (1 << 4)) /* IP. */ + v |= MLX5_ETH_WQE_L3_CSUM; + if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */ + v |= MLX5_ETH_WQE_L4_CSUM; + } + mlx5_cksum_table[i] = v; + } +} + +/** + * Build a table to translate packet type of mbuf to SWP type of Verbs. + */ +void +mlx5_set_swp_types_table(void) +{ + unsigned int i; + uint8_t v; + + /* + * The index should have: + * bit[0:1] = PKT_TX_L4_MASK + * bit[4] = PKT_TX_IPV6 + * bit[8] = PKT_TX_OUTER_IPV6 + * bit[9] = PKT_TX_OUTER_UDP + */ + for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) { + v = 0; + if (i & (1 << 8)) + v |= MLX5_ETH_WQE_L3_OUTER_IPV6; + if (i & (1 << 9)) + v |= MLX5_ETH_WQE_L4_OUTER_UDP; + if (i & (1 << 4)) + v |= MLX5_ETH_WQE_L3_INNER_IPV6; + if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52)) + v |= MLX5_ETH_WQE_L4_INNER_UDP; + mlx5_swp_types_table[i] = v; + } +} + /** * Return the size of tailroom of WQ. * @@ -218,6 +338,60 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n, return ret; } +/** + * Inline TSO headers into WQE. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf, + uint32_t *length, + uintptr_t *addr, + uint16_t *pkt_inline_sz, + uint8_t **raw, + uint16_t *max_wqe, + uint16_t *tso_segsz, + uint16_t *tso_header_sz) +{ + uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) + + (1 << txq->wqe_n) * MLX5_WQE_SIZE); + unsigned int copy_b; + uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0; + const uint8_t tunneled = txq->tunnel_en && (buf->ol_flags & + PKT_TX_TUNNEL_MASK); + uint16_t n_wqe; + + *tso_segsz = buf->tso_segsz; + *tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len; + if (unlikely(*tso_segsz == 0 || *tso_header_sz == 0)) { + txq->stats.oerrors++; + return -EINVAL; + } + if (tunneled) + *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len; + /* First seg must contain all TSO headers. */ + if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) || + *tso_header_sz > DATA_LEN(buf)) { + txq->stats.oerrors++; + return -EINVAL; + } + copy_b = *tso_header_sz - *pkt_inline_sz; + if (!copy_b || ((end - (uintptr_t)*raw) < copy_b)) + return -EAGAIN; + n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; + if (unlikely(*max_wqe < n_wqe)) + return -EINVAL; + *max_wqe -= n_wqe; + rte_memcpy((void *)*raw, (void *)*addr, copy_b); + *length -= copy_b; + *addr += copy_b; + copy_b = MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE; + *pkt_inline_sz += copy_b; + *raw += copy_b; + return 0; +} + /** * DPDK callback to check the status of a tx descriptor. * @@ -335,7 +509,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (unlikely(!max_wqe)) return 0; do { - struct rte_mbuf *buf = NULL; + struct rte_mbuf *buf = *pkts; /* First_seg. */ uint8_t *raw; volatile struct mlx5_wqe_v *wqe = NULL; volatile rte_v128u32_t *dseg = NULL; @@ -347,14 +521,15 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) uint16_t tso_header_sz = 0; uint16_t ehdr; uint8_t cs_flags; - uint64_t tso = 0; + uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG); + uint32_t swp_offsets = 0; + uint8_t swp_types = 0; uint16_t tso_segsz = 0; #ifdef MLX5_PMD_SOFT_COUNTERS uint32_t total_length = 0; #endif + int ret; - /* first_seg */ - buf = *pkts; segs_n = buf->nb_segs; /* * Make sure there is enough room to store this packet and @@ -389,7 +564,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (pkts_n - i > 1) rte_prefetch0( rte_pktmbuf_mtod(*(pkts + 1), volatile void *)); - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); + txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types); raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { @@ -415,54 +591,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) addr += pkt_inline_sz; } raw += MLX5_WQE_DWORD_SIZE; - tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG); if (tso) { - uintptr_t end = - (uintptr_t)(((uintptr_t)txq->wqes) + - (1 << txq->wqe_n) * MLX5_WQE_SIZE); - unsigned int copy_b; - uint8_t vlan_sz = - (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0; - const uint64_t is_tunneled = - buf->ol_flags & (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN); - - tso_header_sz = buf->l2_len + vlan_sz + - buf->l3_len + buf->l4_len; - tso_segsz = buf->tso_segsz; - if (unlikely(tso_segsz == 0)) { - txq->stats.oerrors++; + ret = inline_tso(txq, buf, &length, + &addr, &pkt_inline_sz, + &raw, &max_wqe, + &tso_segsz, &tso_header_sz); + if (ret == -EINVAL) { break; - } - if (is_tunneled && txq->tunnel_en) { - tso_header_sz += buf->outer_l2_len + - buf->outer_l3_len; - cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; - } else { - cs_flags |= MLX5_ETH_WQE_L4_CSUM; - } - if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) { - txq->stats.oerrors++; - break; - } - copy_b = tso_header_sz - pkt_inline_sz; - /* First seg must contain all headers. */ - assert(copy_b <= length); - if (copy_b && ((end - (uintptr_t)raw) > copy_b)) { - uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; - - if (unlikely(max_wqe < n)) - break; - max_wqe -= n; - rte_memcpy((void *)raw, (void *)addr, copy_b); - addr += copy_b; - length -= copy_b; - /* Include padding for TSO header. */ - copy_b = MLX5_WQE_DS(copy_b) * - MLX5_WQE_DWORD_SIZE; - pkt_inline_sz += copy_b; - raw += copy_b; - } else { + } else if (ret == -EAGAIN) { /* NOP WQE. */ wqe->ctrl = (rte_v128u32_t){ rte_cpu_to_be_32(txq->wqe_ci << 8), @@ -507,7 +643,8 @@ pkt_inline: if (unlikely(max_wqe < n)) break; max_wqe -= n; - if (tso && !inl) { + if (tso) { + assert(inl == 0); inl = rte_cpu_to_be_32(copy_b | MLX5_INLINE_SEG); rte_memcpy((void *)raw, @@ -542,8 +679,17 @@ pkt_inline: } else if (!segs_n) { goto next_pkt; } else { - raw += copy_b; - inline_room -= copy_b; + /* + * Further inline the next segment only for + * non-TSO packets. + */ + if (!tso) { + raw += copy_b; + inline_room -= copy_b; + } else { + inline_room = 0; + } + /* Move to the next segment. */ --segs_n; buf = buf->next; assert(buf); @@ -633,8 +779,9 @@ next_pkt: 0, }; wqe->eseg = (rte_v128u32_t){ - 0, - cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16), + swp_offsets, + cs_flags | (swp_types << 8) | + (rte_cpu_to_be_16(tso_segsz) << 16), 0, (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), }; @@ -647,8 +794,8 @@ next_pkt: 0, }; wqe->eseg = (rte_v128u32_t){ - 0, - cs_flags, + swp_offsets, + cs_flags | (swp_types << 8), 0, (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), }; @@ -820,7 +967,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) } max_elts -= segs_n; --pkts_n; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Retrieve packet information. */ length = PKT_LEN(buf); assert(length); @@ -1052,7 +1199,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, * iteration. */ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Retrieve packet information. */ length = PKT_LEN(buf); /* Start new session if packet differs. */ @@ -1320,7 +1467,6 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, do { struct rte_mbuf *buf = *(pkts++); uintptr_t addr; - unsigned int n; unsigned int do_inline = 0; /* Whether inline is possible. */ uint32_t length; uint8_t cs_flags; @@ -1330,7 +1476,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, /* Make sure there is enough room to store this packet. */ if (max_elts - j == 0) break; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Retrieve packet information. */ length = PKT_LEN(buf); /* Start new session if: @@ -1382,7 +1528,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, (!txq->mpw_hdr_dseg || mpw.total_len >= MLX5_WQE_SIZE); } - if (do_inline) { + if (max_inline && do_inline) { /* Inline packet into WQE. */ unsigned int max; @@ -1440,11 +1586,8 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, ((uintptr_t)mpw.data.raw + inl_pad); (*txq->elts)[elts_head++ & elts_m] = buf; - addr = rte_pktmbuf_mtod(buf, uintptr_t); - for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) - rte_prefetch2((void *)(addr + - n * RTE_CACHE_LINE_SIZE)); - addr = rte_cpu_to_be_64(addr); + addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, + uintptr_t)); *dseg = (rte_v128u32_t) { rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), @@ -1541,6 +1684,8 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /** * Translate RX completion flags to packet type. * + * @param[in] rxq + * Pointer to RX queue structure. * @param[in] cqe * Pointer to CQE. * @@ -1550,7 +1695,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Packet type for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) +rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) { uint8_t idx; uint8_t pinfo = cqe->pkt_info; @@ -1565,7 +1710,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe) * bit[7] = outer_l3_type */ idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10); - return mlx5_ptype_table[idx]; + return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6)); } /** @@ -1688,8 +1833,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, /** * Translate RX completion flags to offload flags. * - * @param[in] rxq - * Pointer to RX queue structure. * @param[in] cqe * Pointer to CQE. * @@ -1697,7 +1840,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, * Offload flags (ol_flags) for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) +rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); @@ -1709,17 +1852,55 @@ rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe) TRANSPOSE(flags, MLX5_CQE_RX_L4_HDR_VALID, PKT_RX_L4_CKSUM_GOOD); - if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) - ol_flags |= - TRANSPOSE(flags, - MLX5_CQE_RX_L3_HDR_VALID, - PKT_RX_IP_CKSUM_GOOD) | - TRANSPOSE(flags, - MLX5_CQE_RX_L4_HDR_VALID, - PKT_RX_L4_CKSUM_GOOD); return ol_flags; } +/** + * Fill in mbuf fields from RX completion flags. + * Note that pkt->ol_flags should be initialized outside of this function. + * + * @param rxq + * Pointer to RX queue. + * @param pkt + * mbuf to fill. + * @param cqe + * CQE to process. + * @param rss_hash_res + * Packet RSS Hash result. + */ +static inline void +rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, + volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res) +{ + /* Update packet information. */ + pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe); + if (rss_hash_res && rxq->rss_hash) { + pkt->hash.rss = rss_hash_res; + pkt->ol_flags |= PKT_RX_RSS_HASH; + } + if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { + pkt->ol_flags |= PKT_RX_FDIR; + if (cqe->sop_drop_qpn != + rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { + uint32_t mark = cqe->sop_drop_qpn; + + pkt->ol_flags |= PKT_RX_FDIR_ID; + pkt->hash.fdir.hi = mlx5_flow_mark_get(mark); + } + } + if (rxq->csum) + pkt->ol_flags |= rxq_cq_to_ol_flags(cqe); + if (rxq->vlan_strip && + (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { + pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info); + } + if (rxq->hw_timestamp) { + pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp); + pkt->ol_flags |= PKT_RX_TIMESTAMP; + } +} + /** * DPDK callback for RX. * @@ -1750,7 +1931,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) while (pkts_n) { unsigned int idx = rq_ci & wqe_cnt; - volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx]; + volatile struct mlx5_wqe_data_seg *wqe = + &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; struct rte_mbuf *rep = (*rxq->elts)[idx]; uint32_t rss_hash_res = 0; @@ -1796,40 +1978,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } pkt = seg; assert(len >= (rxq->crc_present << 2)); - /* Update packet information. */ - pkt->packet_type = rxq_cq_to_pkt_type(cqe); pkt->ol_flags = 0; - if (rss_hash_res && rxq->rss_hash) { - pkt->hash.rss = rss_hash_res; - pkt->ol_flags = PKT_RX_RSS_HASH; - } - if (rxq->mark && - MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { - pkt->ol_flags |= PKT_RX_FDIR; - if (cqe->sop_drop_qpn != - rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { - uint32_t mark = cqe->sop_drop_qpn; - - pkt->ol_flags |= PKT_RX_FDIR_ID; - pkt->hash.fdir.hi = - mlx5_flow_mark_get(mark); - } - } - if (rxq->csum | rxq->csum_l2tun) - pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); - if (rxq->vlan_strip && - (cqe->hdr_type_etc & - rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { - pkt->ol_flags |= PKT_RX_VLAN | - PKT_RX_VLAN_STRIPPED; - pkt->vlan_tci = - rte_be_to_cpu_16(cqe->vlan_info); - } - if (rxq->hw_timestamp) { - pkt->timestamp = - rte_be_to_cpu_64(cqe->timestamp); - pkt->ol_flags |= PKT_RX_TIMESTAMP; - } + rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); if (rxq->crc_present) len -= ETHER_CRC_LEN; PKT_LEN(pkt) = len; @@ -1845,6 +1995,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * changes. */ wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + wqe->lkey = mlx5_rx_mb2mr(rxq, rep); if (len > DATA_LEN(seg)) { len -= DATA_LEN(seg); ++NB_SEGS(pkt); @@ -1882,6 +2035,236 @@ skip: return i; } +void +mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque) +{ + struct mlx5_mprq_buf *buf = opaque; + + if (rte_atomic16_read(&buf->refcnt) == 1) { + rte_mempool_put(buf->mp, buf); + } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) { + rte_atomic16_set(&buf->refcnt, 1); + rte_mempool_put(buf->mp, buf); + } +} + +void +mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf) +{ + mlx5_mprq_buf_free_cb(NULL, buf); +} + +static inline void +mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx) +{ + struct mlx5_mprq_buf *rep = rxq->mprq_repl; + volatile struct mlx5_wqe_data_seg *wqe = + &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; + void *addr; + + assert(rep != NULL); + /* Replace MPRQ buf. */ + (*rxq->mprq_bufs)[rq_idx] = rep; + /* Replace WQE. */ + addr = mlx5_mprq_buf_addr(rep); + wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); + /* Stash a mbuf for next replacement. */ + if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep))) + rxq->mprq_repl = rep; + else + rxq->mprq_repl = NULL; +} + +/** + * DPDK callback for RX with Multi-Packet RQ support. + * + * @param dpdk_rxq + * Generic pointer to RX queue structure. + * @param[out] pkts + * Array to store received packets. + * @param pkts_n + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received (<= pkts_n). + */ +uint16_t +mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +{ + struct mlx5_rxq_data *rxq = dpdk_rxq; + const unsigned int strd_n = 1 << rxq->strd_num_n; + const unsigned int strd_sz = 1 << rxq->strd_sz_n; + const unsigned int strd_shift = + MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; + const unsigned int cq_mask = (1 << rxq->cqe_n) - 1; + const unsigned int wq_mask = (1 << rxq->elts_n) - 1; + volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; + unsigned int i = 0; + uint16_t rq_ci = rxq->rq_ci; + uint16_t strd_idx = rxq->strd_ci; + struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; + + while (i < pkts_n) { + struct rte_mbuf *pkt; + void *addr; + int ret; + unsigned int len; + uint16_t consumed_strd; + uint32_t offset; + uint32_t byte_cnt; + uint32_t rss_hash_res = 0; + + if (strd_idx == strd_n) { + /* Replace WQE only if the buffer is still in use. */ + if (rte_atomic16_read(&buf->refcnt) > 1) { + mprq_buf_replace(rxq, rq_ci & wq_mask); + /* Release the old buffer. */ + mlx5_mprq_buf_free(buf); + } else if (unlikely(rxq->mprq_repl == NULL)) { + struct mlx5_mprq_buf *rep; + + /* + * Currently, the MPRQ mempool is out of buffer + * and doing memcpy regardless of the size of Rx + * packet. Retry allocation to get back to + * normal. + */ + if (!rte_mempool_get(rxq->mprq_mp, + (void **)&rep)) + rxq->mprq_repl = rep; + } + /* Advance to the next WQE. */ + strd_idx = 0; + ++rq_ci; + buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; + } + cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; + ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &rss_hash_res); + if (!ret) + break; + if (unlikely(ret == -1)) { + /* RX error, packet is likely too large. */ + ++rxq->stats.idropped; + continue; + } + byte_cnt = ret; + consumed_strd = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >> + MLX5_MPRQ_STRIDE_NUM_SHIFT; + assert(consumed_strd); + /* Calculate offset before adding up stride index. */ + offset = strd_idx * strd_sz + strd_shift; + strd_idx += consumed_strd; + if (byte_cnt & MLX5_MPRQ_FILLER_MASK) + continue; + /* + * Currently configured to receive a packet per a stride. But if + * MTU is adjusted through kernel interface, device could + * consume multiple strides without raising an error. In this + * case, the packet should be dropped because it is bigger than + * the max_rx_pkt_len. + */ + if (unlikely(consumed_strd > 1)) { + ++rxq->stats.idropped; + continue; + } + pkt = rte_pktmbuf_alloc(rxq->mp); + if (unlikely(pkt == NULL)) { + ++rxq->stats.rx_nombuf; + break; + } + len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; + assert((int)len >= (rxq->crc_present << 2)); + if (rxq->crc_present) + len -= ETHER_CRC_LEN; + addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset); + /* Initialize the offload flag. */ + pkt->ol_flags = 0; + /* + * Memcpy packets to the target mbuf if: + * - The size of packet is smaller than mprq_max_memcpy_len. + * - Out of buffer in the Mempool for Multi-Packet RQ. + */ + if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) { + /* + * When memcpy'ing packet due to out-of-buffer, the + * packet must be smaller than the target mbuf. + */ + if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) { + rte_pktmbuf_free_seg(pkt); + ++rxq->stats.idropped; + continue; + } + rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len); + } else { + rte_iova_t buf_iova; + struct rte_mbuf_ext_shared_info *shinfo; + uint16_t buf_len = consumed_strd * strd_sz; + + /* Increment the refcnt of the whole chunk. */ + rte_atomic16_add_return(&buf->refcnt, 1); + assert((uint16_t)rte_atomic16_read(&buf->refcnt) <= + strd_n + 1); + addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM); + /* + * MLX5 device doesn't use iova but it is necessary in a + * case where the Rx packet is transmitted via a + * different PMD. + */ + buf_iova = rte_mempool_virt2iova(buf) + + RTE_PTR_DIFF(addr, buf); + shinfo = rte_pktmbuf_ext_shinfo_init_helper(addr, + &buf_len, mlx5_mprq_buf_free_cb, buf); + /* + * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when + * attaching the stride to mbuf and more offload flags + * will be added below by calling rxq_cq_to_mbuf(). + * Other fields will be overwritten. + */ + rte_pktmbuf_attach_extbuf(pkt, addr, buf_iova, buf_len, + shinfo); + rte_pktmbuf_reset_headroom(pkt); + assert(pkt->ol_flags == EXT_ATTACHED_MBUF); + /* + * Prevent potential overflow due to MTU change through + * kernel interface. + */ + if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) { + rte_pktmbuf_free_seg(pkt); + ++rxq->stats.idropped; + continue; + } + } + rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res); + PKT_LEN(pkt) = len; + DATA_LEN(pkt) = len; + PORT(pkt) = rxq->port_id; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment bytes counter. */ + rxq->stats.ibytes += PKT_LEN(pkt); +#endif + /* Return packet. */ + *(pkts++) = pkt; + ++i; + } + /* Update the consumer indexes. */ + rxq->strd_ci = strd_idx; + rte_cio_wmb(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + if (rq_ci != rxq->rq_ci) { + rxq->rq_ci = rq_ci; + rte_cio_wmb(); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); + } +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment packets counter. */ + rxq->stats.ipackets += i; +#endif + return i; +} + /** * Dummy DPDK callback for TX. * @@ -1899,11 +2282,10 @@ skip: * Number of packets successfully transmitted (<= pkts_n). */ uint16_t -removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_tx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1924,11 +2306,10 @@ removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets successfully received (<= pkts_n). */ uint16_t -removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_rx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1940,58 +2321,49 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) */ uint16_t __attribute__((weak)) -mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } int __attribute__((weak)) -priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq) +mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) { - (void)rxq; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_rx_support(struct priv *priv) +mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused) { - (void)priv; return -ENOTSUP; } diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index d7e89055..f53bb43c 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_RXTX_H_ @@ -29,6 +29,7 @@ #include "mlx5_utils.h" #include "mlx5.h" +#include "mlx5_mr.h" #include "mlx5_autoconf.h" #include "mlx5_defs.h" #include "mlx5_prm.h" @@ -54,17 +55,6 @@ struct mlx5_txq_stats { struct priv; -/* Memory region queue object. */ -struct mlx5_mr { - LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */ - rte_atomic32_t refcnt; /*<< Reference counter. */ - uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */ - uintptr_t start; /* Start address of MR */ - uintptr_t end; /* End address of MR */ - struct ibv_mr *mr; /*<< Memory Region. */ - struct rte_mempool *mp; /*<< Memory Pool. */ -}; - /* Compressed CQE context. */ struct rxq_zip { uint16_t ai; /* Array index. */ @@ -74,10 +64,19 @@ struct rxq_zip { uint32_t cqe_cnt; /* Number of CQEs. */ }; +/* Multi-Packet RQ buffer header. */ +struct mlx5_mprq_buf { + struct rte_mempool *mp; + rte_atomic16_t refcnt; /* Atomically accessed refcnt. */ + uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */ +} __rte_cache_aligned; + +/* Get pointer to the first stride. */ +#define mlx5_mprq_buf_addr(ptr) ((ptr) + 1) + /* RX queue descriptor. */ struct mlx5_rxq_data { unsigned int csum:1; /* Enable checksum offloading. */ - unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ unsigned int hw_timestamp:1; /* Enable HW timestamp. */ unsigned int vlan_strip:1; /* Enable VLAN stripping. */ unsigned int crc_present:1; /* CRC must be subtracted. */ @@ -86,24 +85,37 @@ struct mlx5_rxq_data { unsigned int elts_n:4; /* Log 2 of Mbufs. */ unsigned int rss_hash:1; /* RSS hash result is enabled. */ unsigned int mark:1; /* Marked flow available on the queue. */ - unsigned int :15; /* Remaining bits. */ + unsigned int strd_num_n:5; /* Log 2 of the number of stride. */ + unsigned int strd_sz_n:4; /* Log 2 of stride size. */ + unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */ + unsigned int :6; /* Remaining bits. */ volatile uint32_t *rq_db; volatile uint32_t *cq_db; uint16_t port_id; uint16_t rq_ci; + uint16_t strd_ci; /* Stride index in a WQE for Multi-Packet RQ. */ uint16_t rq_pi; uint16_t cq_ci; - volatile struct mlx5_wqe_data_seg(*wqes)[]; + struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ + uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */ + volatile void *wqes; volatile struct mlx5_cqe(*cqes)[]; struct rxq_zip zip; /* Compressed context. */ - struct rte_mbuf *(*elts)[]; + RTE_STD_C11 + union { + struct rte_mbuf *(*elts)[]; + struct mlx5_mprq_buf *(*mprq_bufs)[]; + }; struct rte_mempool *mp; + struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ + struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ struct mlx5_rxq_stats stats; uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */ struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ void *cq_uar; /* CQ user access region. */ uint32_t cqn; /* CQ number. */ uint8_t cq_arm_sn; /* CQ arm seq number. */ + uint32_t tunnel; /* Tunnel information. */ } __rte_cache_aligned; /* Verbs Rx queue elements. */ @@ -114,18 +126,19 @@ struct mlx5_rxq_ibv { struct ibv_cq *cq; /* Completion Queue. */ struct ibv_wq *wq; /* Work Queue. */ struct ibv_comp_channel *channel; - struct mlx5_mr *mr; /* Memory Region (for mp). */ }; /* RX queue control descriptor. */ struct mlx5_rxq_ctrl { LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ - struct priv *priv; /* Back pointer to private data. */ struct mlx5_rxq_ibv *ibv; /* Verbs elements. */ + struct priv *priv; /* Back pointer to private data. */ struct mlx5_rxq_data rxq; /* Data path structure. */ unsigned int socket; /* CPU socket ID for allocations. */ + uint32_t tunnel_types[16]; /* Tunnel type counter. */ unsigned int irq:1; /* Whether IRQ is enabled. */ + uint16_t idx; /* Queue index. */ }; /* Indirection table. */ @@ -133,7 +146,7 @@ struct mlx5_ind_table_ibv { LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */ - uint16_t queues_n; /**< Number of queues in the list. */ + uint32_t queues_n; /**< Number of queues in the list. */ uint16_t queues[]; /**< Queue list. */ }; @@ -144,7 +157,9 @@ struct mlx5_hrxq { struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */ struct ibv_qp *qp; /* Verbs queue pair. */ uint64_t hash_fields; /* Verbs Hash fields. */ - uint8_t rss_key_len; /* Hash key length in bytes. */ + uint32_t tunnel; /* Tunnel type. */ + uint32_t rss_level; /* RSS on tunnel level. */ + uint32_t rss_key_len; /* Hash key length in bytes. */ uint8_t rss_key[]; /* Hash key. */ }; @@ -167,18 +182,18 @@ struct mlx5_txq_data { uint16_t tso_en:1; /* When set hardware TSO is enabled. */ uint16_t tunnel_en:1; /* When set TX offload for tunneled packets are supported. */ + uint16_t swp_en:1; /* Whether SW parser is enabled. */ uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */ uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ uint16_t inline_max_packet_sz; /* Max packet size for inlining. */ - uint16_t mr_cache_idx; /* Index of last hit entry. */ uint32_t qp_num_8s; /* QP number shifted by 8. */ uint64_t offloads; /* Offloads for Tx Queue. */ + struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */ volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ volatile void *wqes; /* Work queue (use volatile to write into). */ volatile uint32_t *qp_db; /* Work queue doorbell. */ volatile uint32_t *cq_db; /* Completion queue doorbell. */ volatile void *bf_reg; /* Blueflame register remapped. */ - struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ struct mlx5_txq_stats stats; /* TX queue counters. */ } __rte_cache_aligned; @@ -187,6 +202,7 @@ struct mlx5_txq_data { struct mlx5_txq_ibv { LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ + struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */ struct ibv_cq *cq; /* Completion Queue. */ struct ibv_qp *qp; /* Queue Pair. */ }; @@ -195,14 +211,15 @@ struct mlx5_txq_ibv { struct mlx5_txq_ctrl { LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ - struct priv *priv; /* Back pointer to private data. */ unsigned int socket; /* CPU socket ID for allocations. */ unsigned int max_inline_data; /* Max inline data. */ unsigned int max_tso_header; /* Max TSO header size. */ struct mlx5_txq_ibv *ibv; /* Verbs queue object. */ + struct priv *priv; /* Back pointer to private data. */ struct mlx5_txq_data txq; /* Data path structure. */ off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */ volatile void *bf_reg_orig; /* Blueflame register from verbs. */ + uint16_t idx; /* Queue index. */ }; /* mlx5_rxq.c */ @@ -210,97 +227,126 @@ struct mlx5_txq_ctrl { extern uint8_t rss_hash_default_key[]; extern const size_t rss_hash_default_key_len; -void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *); -int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, - const struct rte_eth_rxconf *, struct rte_mempool *); -void mlx5_rx_queue_release(void *); -int priv_rx_intr_vec_enable(struct priv *priv); -void priv_rx_intr_vec_disable(struct priv *priv); +int mlx5_check_mprq_support(struct rte_eth_dev *dev); +int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq); +int mlx5_mprq_enabled(struct rte_eth_dev *dev); +int mlx5_mprq_free_mp(struct rte_eth_dev *dev); +int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev); +void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl); +int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +void mlx5_rx_queue_release(void *dpdk_rxq); +int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev); +void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev); int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id); int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *, uint16_t); -struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t); -int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *); -int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *); -int mlx5_priv_rxq_ibv_verify(struct priv *); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t, - uint16_t, unsigned int, - const struct rte_eth_rxconf *, - struct rte_mempool *); -struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t); -int mlx5_priv_rxq_release(struct priv *, uint16_t); -int mlx5_priv_rxq_releasable(struct priv *, uint16_t); -int mlx5_priv_rxq_verify(struct priv *); -int rxq_alloc_elts(struct mlx5_rxq_ctrl *); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *, - uint16_t [], - uint16_t); -struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *, - uint16_t [], - uint16_t); -int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *); -int mlx5_priv_ind_table_ibv_verify(struct priv *); -struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t, - uint64_t, uint16_t [], uint16_t); -struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t, - uint64_t, uint16_t [], uint16_t); -int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *); -int mlx5_priv_hrxq_ibv_verify(struct priv *); -uint64_t mlx5_priv_get_rx_port_offloads(struct priv *); -uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *); +struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx); +struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv); +int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp); +struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_rxq_verify(struct rte_eth_dev *dev); +int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); +int rxq_alloc_mprq_buf(struct mlx5_rxq_ctrl *rxq_ctrl); +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, + const uint16_t *queues, + uint32_t queues_n); +struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, + const uint16_t *queues, + uint32_t queues_n); +int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_ibv *ind_tbl); +int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + uint32_t tunnel, uint32_t rss_level); +struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + uint32_t tunnel, uint32_t rss_level); +int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq); +int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev); +uint64_t mlx5_get_rx_port_offloads(void); +uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); /* mlx5_txq.c */ -int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, - const struct rte_eth_txconf *); -void mlx5_tx_queue_release(void *); -int priv_tx_uar_remap(struct priv *priv, int fd); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *, uint16_t); -struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *, uint16_t); -int mlx5_priv_txq_ibv_release(struct priv *, struct mlx5_txq_ibv *); -int mlx5_priv_txq_ibv_releasable(struct priv *, struct mlx5_txq_ibv *); -int mlx5_priv_txq_ibv_verify(struct priv *); -struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *, uint16_t, - uint16_t, unsigned int, - const struct rte_eth_txconf *); -struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *, uint16_t); -int mlx5_priv_txq_release(struct priv *, uint16_t); -int mlx5_priv_txq_releasable(struct priv *, uint16_t); -int mlx5_priv_txq_verify(struct priv *); -void txq_alloc_elts(struct mlx5_txq_ctrl *); -uint64_t mlx5_priv_get_tx_port_offloads(struct priv *); +int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf); +void mlx5_tx_queue_release(void *dpdk_txq); +int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd); +struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx); +struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv); +int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv); +int mlx5_txq_ibv_verify(struct rte_eth_dev *dev); +struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf); +struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx); +int mlx5_txq_verify(struct rte_eth_dev *dev); +void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl); +uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev); /* mlx5_rxtx.c */ extern uint32_t mlx5_ptype_table[]; +extern uint8_t mlx5_cksum_table[]; +extern uint8_t mlx5_swp_types_table[]; void mlx5_set_ptype_table(void); -uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); -int mlx5_rx_descriptor_status(void *, uint16_t); -int mlx5_tx_descriptor_status(void *, uint16_t); +void mlx5_set_cksum_table(void); +void mlx5_set_swp_types_table(void); +uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); +void mlx5_mprq_buf_free_cb(void *addr, void *opaque); +void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf); +uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset); +int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset); /* Vectorized version of mlx5_rxtx.c */ -int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *); -int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *); -int rxq_check_vec_support(struct mlx5_rxq_data *); -int priv_check_vec_rx_support(struct priv *); -uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t); +int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev); +int mlx5_check_vec_tx_support(struct rte_eth_dev *dev); +int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data); +int mlx5_check_vec_rx_support(struct rte_eth_dev *dev); +uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); +uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n); /* mlx5_mr.c */ -void mlx5_mp2mr_iter(struct rte_mempool *, void *); -struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *, - struct rte_mempool *, unsigned int); -struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *, - unsigned int); +void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); +uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr); +uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr); #ifndef NDEBUG /** @@ -363,9 +409,10 @@ check_cqe(volatile struct mlx5_cqe *cqe, (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR)) return 0; if (!check_cqe_seen(cqe)) { - ERROR("unexpected CQE error %u (0x%02x)" - " syndrome 0x%02x", - op_code, op_code, syndrome); + DRV_LOG(ERR, + "unexpected CQE error %u (0x%02x) syndrome" + " 0x%02x", + op_code, op_code, syndrome); rte_hexdump(stderr, "MLX5 Error CQE:", (const void *)((uintptr_t)err_cqe), sizeof(*err_cqe)); @@ -374,8 +421,8 @@ check_cqe(volatile struct mlx5_cqe *cqe, } else if ((op_code != MLX5_CQE_RESP_SEND) && (op_code != MLX5_CQE_REQ)) { if (!check_cqe_seen(cqe)) { - ERROR("unexpected CQE opcode %u (0x%02x)", - op_code, op_code); + DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)", + op_code, op_code); rte_hexdump(stderr, "MLX5 CQE:", (const void *)((uintptr_t)cqe), sizeof(*cqe)); @@ -435,7 +482,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq) if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) || (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) { if (!check_cqe_seen(cqe)) { - ERROR("unexpected error CQE, TX stopped"); + DRV_LOG(ERR, "unexpected error CQE, Tx stopped"); rte_hexdump(stderr, "MLX5 TXQ:", (const void *)((uintptr_t)txq->wqes), ((1 << txq->wqe_n) * @@ -487,77 +534,65 @@ mlx5_tx_complete(struct mlx5_txq_data *txq) } /** - * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which - * the cloned mbuf is allocated is returned instead. + * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx + * as mempool is pre-configured and static. * - * @param buf - * Pointer to mbuf. + * @param rxq + * Pointer to Rx queue structure. + * @param addr + * Address to search. * * @return - * Memory pool where data is located for given mbuf. + * Searched LKey on success, UINT32_MAX on no match. */ -static struct rte_mempool * -mlx5_tx_mb2mp(struct rte_mbuf *buf) +static __rte_always_inline uint32_t +mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr) { - if (unlikely(RTE_MBUF_INDIRECT(buf))) - return rte_mbuf_from_indirect(buf)->pool; - return buf->pool; + struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; + uint32_t lkey; + + /* Linear search on MR cache array. */ + lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, + MLX5_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half (Binary Search) on miss. */ + return mlx5_rx_addr2mr_bh(rxq, addr); } +#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) + /** - * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[]. - * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, - * remove an entry first. + * Query LKey from a packet buffer for Tx. If not found, add the mempool. * * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. + * Pointer to Tx queue structure. + * @param addr + * Address to search. * * @return - * mr->lkey on success, (uint32_t)-1 on failure. + * Searched LKey on success, UINT32_MAX on no match. */ static __rte_always_inline uint32_t -mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) +mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr) { - uint16_t i = txq->mr_cache_idx; - uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t); - struct mlx5_mr *mr; - - assert(i < RTE_DIM(txq->mp2mr)); - if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end > addr)) - return txq->mp2mr[i]->lkey; - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (unlikely(txq->mp2mr[i] == NULL || - txq->mp2mr[i]->mr == NULL)) { - /* Unknown MP, add a new MR for it. */ - break; - } - if (txq->mp2mr[i]->start <= addr && - txq->mp2mr[i]->end > addr) { - assert(txq->mp2mr[i]->lkey != (uint32_t)-1); - txq->mr_cache_idx = i; - return txq->mp2mr[i]->lkey; - } - } - mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i); - /* - * Request the reference to use in this queue, the original one is - * kept by the control plane. - */ - if (mr) { - rte_atomic32_inc(&mr->refcnt); - txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i; - return mr->lkey; - } else { - struct rte_mempool *mp = mlx5_tx_mb2mp(mb); - - WARN("Failed to register mempool 0x%p(%s)", - (void *)mp, mp->name); - } - return (uint32_t)-1; + struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + uint32_t lkey; + + /* Check generation bit to see if there's any change on existing MRs. */ + if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen)) + mlx5_mr_flush_local_cache(mr_ctrl); + /* Linear search on MR cache array. */ + lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru, + MLX5_MR_CACHE_N, addr); + if (likely(lkey != UINT32_MAX)) + return lkey; + /* Take slower bottom-half (binary search) on miss. */ + return mlx5_tx_addr2mr_bh(txq, addr); } +#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) + /** * Ring TX queue doorbell and flush the update if requested. * @@ -599,38 +634,100 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) } /** - * Convert the Checksum offloads to Verbs. + * Convert mbuf to Verb SWP. * * @param txq_data * Pointer to the Tx queue. * @param buf * Pointer to the mbuf. + * @param tso + * TSO offloads enabled. + * @param vlan + * VLAN offloads enabled + * @param offsets + * Pointer to the SWP header offsets. + * @param swp_types + * Pointer to the SWP header types. + */ +static __rte_always_inline void +txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf, + uint8_t *offsets, uint8_t *swp_types) +{ + const uint64_t vlan = buf->ol_flags & PKT_TX_VLAN_PKT; + const uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK; + const uint64_t tso = buf->ol_flags & PKT_TX_TCP_SEG; + const uint64_t csum_flags = buf->ol_flags & PKT_TX_L4_MASK; + const uint64_t inner_ip = + buf->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6); + const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 | + PKT_TX_OUTER_IPV6; + uint16_t idx; + uint16_t off; + + if (likely(!txq->swp_en || (tunnel != PKT_TX_TUNNEL_UDP && + tunnel != PKT_TX_TUNNEL_IP))) + return; + /* + * The index should have: + * bit[0:1] = PKT_TX_L4_MASK + * bit[4] = PKT_TX_IPV6 + * bit[8] = PKT_TX_OUTER_IPV6 + * bit[9] = PKT_TX_OUTER_UDP + */ + idx = (buf->ol_flags & ol_flags_mask) >> 52; + if (tunnel == PKT_TX_TUNNEL_UDP) + idx |= 1 << 9; + *swp_types = mlx5_swp_types_table[idx]; + /* + * Set offsets for SW parser. Since ConnectX-5, SW parser just + * complements HW parser. SW parser starts to engage only if HW parser + * can't reach a header. For the older devices, HW parser will not kick + * in if any of SWP offsets is set. Therefore, all of the L3 offsets + * should be set regardless of HW offload. + */ + off = buf->outer_l2_len + (vlan ? sizeof(struct vlan_hdr) : 0); + offsets[1] = off >> 1; /* Outer L3 offset. */ + off += buf->outer_l3_len; + if (tunnel == PKT_TX_TUNNEL_UDP) + offsets[0] = off >> 1; /* Outer L4 offset. */ + if (inner_ip) { + off += buf->l2_len; + offsets[3] = off >> 1; /* Inner L3 offset. */ + if (csum_flags == PKT_TX_TCP_CKSUM || tso || + csum_flags == PKT_TX_UDP_CKSUM) { + off += buf->l3_len; + offsets[2] = off >> 1; /* Inner L4 offset. */ + } + } +} + +/** + * Convert the Checksum offloads to Verbs. + * + * @param buf + * Pointer to the mbuf. * * @return - * the converted cs_flags. + * Converted checksum flags. */ static __rte_always_inline uint8_t -txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf) +txq_ol_cksum_to_cs(struct rte_mbuf *buf) { - uint8_t cs_flags = 0; - - /* Should we enable HW CKSUM offload */ - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | - PKT_TX_OUTER_IP_CKSUM)) { - if (txq_data->tunnel_en && - (buf->ol_flags & - (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) { - cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | - MLX5_ETH_WQE_L4_INNER_CSUM; - if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) - cs_flags |= MLX5_ETH_WQE_L3_CSUM; - } else { - cs_flags = MLX5_ETH_WQE_L3_CSUM | - MLX5_ETH_WQE_L4_CSUM; - } - } - return cs_flags; + uint32_t idx; + uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK); + const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK | + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM; + + /* + * The index should have: + * bit[0] = PKT_TX_TCP_SEG + * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM + * bit[4] = PKT_TX_IP_CKSUM + * bit[8] = PKT_TX_OUTER_IP_CKSUM + * bit[9] = tunnel + */ + idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9); + return mlx5_cksum_table[idx]; } /** diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index b66c2916..0a4aed8f 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -42,8 +42,6 @@ /** * Count the number of packets having same ol_flags and calculate cs_flags. * - * @param txq - * Pointer to TX queue structure. * @param pkts * Pointer to array of packets. * @param pkts_n @@ -55,8 +53,7 @@ * Number of packets having same ol_flags. */ static inline unsigned int -txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, - uint16_t pkts_n, uint8_t *cs_flags) +txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags) { unsigned int pos; const uint64_t ol_mask = @@ -70,7 +67,7 @@ txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, for (pos = 1; pos < pkts_n; ++pos) if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask) break; - *cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]); + *cs_flags = txq_ol_cksum_to_cs(pkts[0]); return pos; } @@ -141,7 +138,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) n = txq_count_contig_single_seg(&pkts[nb_tx], n); if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP) - n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags); + n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags); ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags); nb_tx += ret; if (!ret) @@ -223,17 +220,14 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) /** * Check Tx queue flags are set for raw vectorized Tx. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_raw_vec_tx_support(__rte_unused struct priv *priv, - struct rte_eth_dev *dev) +mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev) { uint64_t offloads = dev->data->dev_conf.txmode.offloads; @@ -246,17 +240,16 @@ priv_check_raw_vec_tx_support(__rte_unused struct priv *priv, /** * Check a device can support vectorized TX. * - * @param priv - * Pointer to private structure. * @param dev - * Pointer to rte_eth_dev structure. + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +mlx5_check_vec_tx_support(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint64_t offloads = dev->data->dev_conf.txmode.offloads; if (!priv->config.tx_vec_en || @@ -277,11 +270,13 @@ priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq) +mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq) { struct mlx5_rxq_ctrl *ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); + if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv))) + return -ENOTSUP; if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0) return -ENOTSUP; return 1; @@ -290,26 +285,29 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq) /** * Check a device can support vectorized RX. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * 1 if supported, negative errno value if not. */ int __attribute__((cold)) -priv_check_vec_rx_support(struct priv *priv) +mlx5_check_vec_rx_support(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint16_t i; if (!priv->config.rx_vec_en) return -ENOTSUP; + if (mlx5_mprq_enabled(dev)) + return -ENOTSUP; /* All the configured queues should support. */ for (i = 0; i < priv->rxqs_n; ++i) { struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; if (!rxq) continue; - if (rxq_check_vec_support(rxq) < 0) + if (mlx5_rxq_check_vec_support(rxq) < 0) break; } if (i != priv->rxqs_n) diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h index 44856bbf..598dc751 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_RXTX_VEC_H_ @@ -87,7 +87,8 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) const uint16_t q_mask = q_n - 1; uint16_t elts_idx = rxq->rq_ci & q_mask; struct rte_mbuf **elts = &(*rxq->elts)[elts_idx]; - volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx]; + volatile struct mlx5_wqe_data_seg *wq = + &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx]; unsigned int i; assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH); @@ -99,9 +100,13 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n) rxq->stats.rx_nombuf += n; return; } - for (i = 0; i < n; ++i) + for (i = 0; i < n; ++i) { wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + RTE_PKTMBUF_HEADROOM); + /* If there's only one MR, no need to replace LKey in WQE. */ + if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) + wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]); + } rxq->rq_ci += n; /* Prevent overflowing into consumed mbufs. */ elts_idx = rxq->rq_ci & q_mask; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h index bbe1818e..71a5eaf2 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_ @@ -142,7 +142,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, break; wqe = &((volatile struct mlx5_wqe64 *) txq->wqes)[wqe_ci & wq_mask].hdr; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Title WQEBB pointer. */ t_wqe = (uint8x16_t *)wqe; dseg = (uint8_t *)(wqe + 1); @@ -167,8 +167,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, vst1q_u8((void *)t_wqe, ctrl); /* Fill ESEG in the header. */ vst1q_u16((void *)(t_wqe + 1), - (uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len), - 0, 0, 0, 0 }); + ((uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len), + 0, 0, 0, 0 })); txq->wqe_ci = wqe_ci; } if (!n) @@ -300,10 +300,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, vst1q_u8((void *)t_wqe, ctrl); /* Fill ESEG in the header. */ vst1q_u8((void *)(t_wqe + 1), - (uint8x16_t) { 0, 0, 0, 0, - cs_flags, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0 }); + ((uint8x16_t) { 0, 0, 0, 0, + cs_flags, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0 })); #ifdef MLX5_PMD_SOFT_COUNTERS txq->stats.opackets += pkts_n; #endif @@ -551,6 +551,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer); const uint64x1_t r32_mask = vcreate_u64(0xffffffff); uint64x2_t rearm0, rearm1, rearm2, rearm3; + uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3; if (rxq->mark) { const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT); @@ -583,14 +584,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, ptype = vshrn_n_u32(ptype_info, 10); /* Errored packets will have RTE_PTYPE_ALL_MASK. */ ptype = vorr_u16(ptype, op_err); - pkts[0]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)]; - pkts[1]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)]; - pkts[2]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)]; - pkts[3]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)]; + pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6); + pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4); + pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2); + pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0); + pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] | + !!(pt_idx0 & (1 << 6)) * rxq->tunnel; + pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] | + !!(pt_idx1 & (1 << 6)) * rxq->tunnel; + pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] | + !!(pt_idx2 & (1 << 6)) * rxq->tunnel; + pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] | + !!(pt_idx3 & (1 << 6)) * rxq->tunnel; /* Fill flags for checksum and VLAN. */ pinfo = vandq_u32(ptype_info, ptype_ol_mask); pinfo = vreinterpretq_u32_u8( diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h index c088bcb5..3e985d61 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_ @@ -144,7 +144,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, } wqe = &((volatile struct mlx5_wqe64 *) txq->wqes)[wqe_ci & wq_mask].hdr; - cs_flags = txq_ol_cksum_to_cs(txq, buf); + cs_flags = txq_ol_cksum_to_cs(buf); /* Title WQEBB pointer. */ t_wqe = (__m128i *)wqe; dseg = (__m128i *)(wqe + 1); @@ -542,6 +542,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], const __m128i mbuf_init = _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer); __m128i rearm0, rearm1, rearm2, rearm3; + uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3; /* Extract pkt_info field. */ pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]); @@ -595,10 +596,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], /* Errored packets will have RTE_PTYPE_ALL_MASK. */ op_err = _mm_srli_epi16(op_err, 8); ptype = _mm_or_si128(ptype, op_err); - pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)]; - pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)]; - pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)]; - pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)]; + pt_idx0 = _mm_extract_epi8(ptype, 0); + pt_idx1 = _mm_extract_epi8(ptype, 2); + pt_idx2 = _mm_extract_epi8(ptype, 4); + pt_idx3 = _mm_extract_epi8(ptype, 6); + pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] | + !!(pt_idx0 & (1 << 6)) * rxq->tunnel; + pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] | + !!(pt_idx1 & (1 << 6)) * rxq->tunnel; + pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] | + !!(pt_idx2 & (1 << 6)) * rxq->tunnel; + pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] | + !!(pt_idx3 & (1 << 6)) * rxq->tunnel; /* Fill flags for checksum and VLAN. */ pinfo = _mm_and_si128(pinfo, ptype_ol_mask); pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo); diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c index 61c1a4a5..99297d5c 100644 --- a/drivers/net/mlx5/mlx5_socket.c +++ b/drivers/net/mlx5/mlx5_socket.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox Technologies, Ltd */ #define _GNU_SOURCE @@ -18,21 +19,21 @@ /** * Initialise the socket to communicate with the secondary process * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_socket_init(struct priv *priv) +mlx5_socket_init(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct sockaddr_un sun = { .sun_family = AF_UNIX, }; int ret; int flags; - struct stat file_stat; /* * Initialise the socket to communicate with the secondary @@ -40,70 +41,77 @@ priv_socket_init(struct priv *priv) */ ret = socket(AF_UNIX, SOCK_STREAM, 0); if (ret < 0) { - WARN("secondary process not supported: %s", strerror(errno)); - return ret; + rte_errno = errno; + DRV_LOG(WARNING, "port %u secondary process not supported: %s", + dev->data->port_id, strerror(errno)); + goto error; } priv->primary_socket = ret; flags = fcntl(priv->primary_socket, F_GETFL, 0); - if (flags == -1) - goto out; + if (flags == -1) { + rte_errno = errno; + goto error; + } ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK); - if (ret < 0) - goto out; + if (ret < 0) { + rte_errno = errno; + goto error; + } snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); - ret = stat(sun.sun_path, &file_stat); - if (!ret) - claim_zero(remove(sun.sun_path)); + remove(sun.sun_path); ret = bind(priv->primary_socket, (const struct sockaddr *)&sun, sizeof(sun)); if (ret < 0) { - WARN("cannot bind socket, secondary process not supported: %s", - strerror(errno)); + rte_errno = errno; + DRV_LOG(WARNING, + "port %u cannot bind socket, secondary process not" + " supported: %s", + dev->data->port_id, strerror(errno)); goto close; } ret = listen(priv->primary_socket, 0); if (ret < 0) { - WARN("Secondary process not supported: %s", strerror(errno)); + rte_errno = errno; + DRV_LOG(WARNING, "port %u secondary process not supported: %s", + dev->data->port_id, strerror(errno)); goto close; } - return ret; + return 0; close: remove(sun.sun_path); -out: +error: claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; - return -(ret); + return -rte_errno; } /** * Un-Initialise the socket to communicate with the secondary process * - * @param[in] priv - * Pointer to private structure. - * - * @return - * 0 on success, errno value on failure. + * @param[in] dev */ -int -priv_socket_uninit(struct priv *priv) +void +mlx5_socket_uninit(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; + MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); claim_zero(close(priv->primary_socket)); priv->primary_socket = 0; claim_zero(remove(path)); - return 0; } /** * Handle socket interrupts. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_socket_handle(struct priv *priv) +mlx5_socket_handle(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int conn_sock; int ret = 0; struct cmsghdr *cmsg = NULL; @@ -125,25 +133,30 @@ priv_socket_handle(struct priv *priv) /* Accept the connection from the client. */ conn_sock = accept(priv->primary_socket, NULL, NULL); if (conn_sock < 0) { - WARN("connection failed: %s", strerror(errno)); + DRV_LOG(WARNING, "port %u connection failed: %s", + dev->data->port_id, strerror(errno)); return; } ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1}, sizeof(int)); if (ret < 0) { - WARN("cannot change socket options"); - goto out; + ret = errno; + DRV_LOG(WARNING, "port %u cannot change socket options: %s", + dev->data->port_id, strerror(rte_errno)); + goto error; } ret = recvmsg(conn_sock, &msg, MSG_WAITALL); if (ret < 0) { - WARN("received an empty message: %s", strerror(errno)); - goto out; + ret = errno; + DRV_LOG(WARNING, "port %u received an empty message: %s", + dev->data->port_id, strerror(rte_errno)); + goto error; } /* Expect to receive credentials only. */ cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { - WARN("no message"); - goto out; + DRV_LOG(WARNING, "port %u no message", dev->data->port_id); + goto error; } if ((cmsg->cmsg_type == SCM_CREDENTIALS) && (cmsg->cmsg_len >= sizeof(*cred))) { @@ -152,14 +165,16 @@ priv_socket_handle(struct priv *priv) } cmsg = CMSG_NXTHDR(&msg, cmsg); if (cmsg != NULL) { - WARN("Message wrongly formatted"); - goto out; + DRV_LOG(WARNING, "port %u message wrongly formatted", + dev->data->port_id); + goto error; } /* Make sure all the ancillary data was received and valid. */ if ((cred == NULL) || (cred->uid != getuid()) || (cred->gid != getgid())) { - WARN("wrong credentials"); - goto out; + DRV_LOG(WARNING, "port %u wrong credentials", + dev->data->port_id); + goto error; } /* Set-up the ancillary data. */ cmsg = CMSG_FIRSTHDR(&msg); @@ -171,27 +186,29 @@ priv_socket_handle(struct priv *priv) *fd = priv->ctx->cmd_fd; ret = sendmsg(conn_sock, &msg, 0); if (ret < 0) - WARN("cannot send response"); -out: + DRV_LOG(WARNING, "port %u cannot send response", + dev->data->port_id); +error: close(conn_sock); } /** * Connect to the primary process. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet structure. * * @return - * fd on success, negative errno value on failure. + * fd on success, negative errno value otherwise and rte_errno is set. */ int -priv_socket_connect(struct priv *priv) +mlx5_socket_connect(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct sockaddr_un sun = { .sun_family = AF_UNIX, }; - int socket_fd; + int socket_fd = -1; int *fd = NULL; int ret; struct ucred *cred; @@ -211,57 +228,75 @@ priv_socket_connect(struct priv *priv) ret = socket(AF_UNIX, SOCK_STREAM, 0); if (ret < 0) { - WARN("cannot connect to primary"); - return ret; + rte_errno = errno; + DRV_LOG(WARNING, "port %u cannot connect to primary", + dev->data->port_id); + goto error; } socket_fd = ret; snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket); ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun)); if (ret < 0) { - WARN("cannot connect to primary"); - goto out; + rte_errno = errno; + DRV_LOG(WARNING, "port %u cannot connect to primary", + dev->data->port_id); + goto error; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { - DEBUG("cannot get first message"); - goto out; + rte_errno = EINVAL; + DRV_LOG(DEBUG, "port %u cannot get first message", + dev->data->port_id); + goto error; } cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_CREDENTIALS; cmsg->cmsg_len = CMSG_LEN(sizeof(*cred)); cred = (struct ucred *)CMSG_DATA(cmsg); if (cred == NULL) { - DEBUG("no credentials received"); - goto out; + rte_errno = EINVAL; + DRV_LOG(DEBUG, "port %u no credentials received", + dev->data->port_id); + goto error; } cred->pid = getpid(); cred->uid = getuid(); cred->gid = getgid(); ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT); if (ret < 0) { - WARN("cannot send credentials to primary: %s", - strerror(errno)); - goto out; + rte_errno = errno; + DRV_LOG(WARNING, + "port %u cannot send credentials to primary: %s", + dev->data->port_id, strerror(errno)); + goto error; } ret = recvmsg(socket_fd, &msg, MSG_WAITALL); if (ret <= 0) { - WARN("no message from primary: %s", strerror(errno)); - goto out; + rte_errno = errno; + DRV_LOG(WARNING, "port %u no message from primary: %s", + dev->data->port_id, strerror(errno)); + goto error; } cmsg = CMSG_FIRSTHDR(&msg); if (cmsg == NULL) { - WARN("No file descriptor received"); - goto out; + rte_errno = EINVAL; + DRV_LOG(WARNING, "port %u no file descriptor received", + dev->data->port_id); + goto error; } fd = (int *)CMSG_DATA(cmsg); - if (*fd <= 0) { - WARN("no file descriptor received: %s", strerror(errno)); - ret = *fd; - goto out; + if (*fd < 0) { + DRV_LOG(WARNING, "port %u no file descriptor received: %s", + dev->data->port_id, strerror(errno)); + rte_errno = *fd; + goto error; } ret = *fd; -out: close(socket_fd); return ret; +error: + if (socket_fd != -1) + close(socket_fd); + return -rte_errno; } diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 378472a7..875dd102 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -1,10 +1,13 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ +#include #include #include +#include +#include #include #include @@ -19,6 +22,7 @@ struct mlx5_counter_ctrl { char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE]; /* Name of the counter on the device table. */ char ctr_name[RTE_ETH_XSTATS_NAME_SIZE]; + uint32_t ib:1; /**< Nonzero for IB counters. */ }; static const struct mlx5_counter_ctrl mlx5_counters_init[] = { @@ -93,6 +97,7 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = { { .dpdk_name = "rx_out_of_buffer", .ctr_name = "out_of_buffer", + .ib = 1, }, { .dpdk_name = "tx_packets_phy", @@ -117,39 +122,56 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); /** * Read device counters table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] stats * Counters table output buffer. * * @return - * 0 on success and stats is filled, negative on error. + * 0 on success and stats is filled, negative errno value otherwise and + * rte_errno is set. */ static int -priv_read_dev_counters(struct priv *priv, uint64_t *stats) +mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) { + struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; unsigned int i; struct ifreq ifr; unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; + int ret; et_stats->cmd = ETHTOOL_GSTATS; et_stats->n_stats = xstats_ctrl->stats_n; ifr.ifr_data = (caddr_t)et_stats; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { - WARN("unable to read statistic values from device"); - return -1; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, + "port %u unable to read statistic values from device", + dev->data->port_id); + return ret; } for (i = 0; i != xstats_n; ++i) { - if (priv_is_ib_cntr(mlx5_counters_init[i].ctr_name)) - priv_get_cntr_sysfs(priv, - mlx5_counters_init[i].ctr_name, - &stats[i]); - else + if (mlx5_counters_init[i].ib) { + FILE *file; + MKSTR(path, "%s/ports/1/hw_counters/%s", + priv->ibdev_path, + mlx5_counters_init[i].ctr_name); + + file = fopen(path, "rb"); + if (file) { + int n = fscanf(file, "%" SCNu64, &stats[i]); + + fclose(file); + if (n != 1) + stats[i] = 0; + } + } else { stats[i] = (uint64_t) et_stats->data[xstats_ctrl->dev_table_idx[i]]; + } } return 0; } @@ -157,22 +179,26 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats) /** * Query the number of statistics provided by ETHTOOL. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return - * Number of statistics on success, -1 on error. + * Number of statistics on success, negative errno value otherwise and + * rte_errno is set. */ static int -priv_ethtool_get_stats_n(struct priv *priv) { +mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) { struct ethtool_drvinfo drvinfo; struct ifreq ifr; + int ret; drvinfo.cmd = ETHTOOL_GDRVINFO; ifr.ifr_data = (caddr_t)&drvinfo; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { - WARN("unable to query number of statistics"); - return -1; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u unable to query number of statistics", + dev->data->port_id); + return ret; } return drvinfo.n_stats; } @@ -180,12 +206,13 @@ priv_ethtool_get_stats_n(struct priv *priv) { /** * Init the structures to read device counters. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_xstats_init(struct priv *priv) +mlx5_xstats_init(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; unsigned int i; unsigned int j; @@ -193,12 +220,15 @@ priv_xstats_init(struct priv *priv) struct ethtool_gstrings *strings = NULL; unsigned int dev_stats_n; unsigned int str_sz; + int ret; - dev_stats_n = priv_ethtool_get_stats_n(priv); - if (dev_stats_n < 1) { - WARN("no extended statistics available"); + ret = mlx5_ethtool_get_stats_n(dev); + if (ret < 0) { + DRV_LOG(WARNING, "port %u no extended statistics available", + dev->data->port_id); return; } + dev_stats_n = ret; xstats_ctrl->stats_n = dev_stats_n; /* Allocate memory to grab stat names and values. */ str_sz = dev_stats_n * ETH_GSTRING_LEN; @@ -206,15 +236,18 @@ priv_xstats_init(struct priv *priv) rte_malloc("xstats_strings", str_sz + sizeof(struct ethtool_gstrings), 0); if (!strings) { - WARN("unable to allocate memory for xstats"); + DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", + dev->data->port_id); return; } strings->cmd = ETHTOOL_GSTRINGS; strings->string_set = ETH_SS_STATS; strings->len = dev_stats_n; ifr.ifr_data = (caddr_t)strings; - if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { - WARN("unable to get statistic names"); + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u unable to get statistic names", + dev->data->port_id); goto free; } for (j = 0; j != xstats_n; ++j) @@ -232,68 +265,67 @@ priv_xstats_init(struct priv *priv) } } for (j = 0; j != xstats_n; ++j) { - if (priv_is_ib_cntr(mlx5_counters_init[j].ctr_name)) + if (mlx5_counters_init[j].ib) continue; if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) { - WARN("counter \"%s\" is not recognized", - mlx5_counters_init[j].dpdk_name); + DRV_LOG(WARNING, + "port %u counter \"%s\" is not recognized", + dev->data->port_id, + mlx5_counters_init[j].dpdk_name); goto free; } } /* Copy to base at first time. */ assert(xstats_n <= MLX5_MAX_XSTATS); - priv_read_dev_counters(priv, xstats_ctrl->base); + ret = mlx5_read_dev_counters(dev, xstats_ctrl->base); + if (ret) + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); free: rte_free(strings); } /** - * Get device extended statistics. + * DPDK callback to get extended device statistics. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param[out] stats * Pointer to rte extended stats table. + * @param n + * The size of the stats table. * * @return * Number of extended stats on success and stats is filled, - * negative on error. + * negative on error and rte_errno is set. */ -static int -priv_xstats_get(struct priv *priv, struct rte_eth_xstat *stats) +int +mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n) { - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + struct priv *priv = dev->data->dev_private; unsigned int i; - unsigned int n = xstats_n; uint64_t counters[n]; - if (priv_read_dev_counters(priv, counters) < 0) - return -1; - for (i = 0; i != xstats_n; ++i) { - stats[i].id = i; - stats[i].value = (counters[i] - xstats_ctrl->base[i]); - } - return n; -} - -/** - * Reset device extended statistics. - * - * @param priv - * Pointer to private structure. - */ -static void -priv_xstats_reset(struct priv *priv) -{ - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - unsigned int i; - unsigned int n = xstats_n; - uint64_t counters[n]; + if (n >= xstats_n && stats) { + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + int stats_n; + int ret; - if (priv_read_dev_counters(priv, counters) < 0) - return; - for (i = 0; i != n; ++i) - xstats_ctrl->base[i] = counters[i]; + stats_n = mlx5_ethtool_get_stats_n(dev); + if (stats_n < 0) + return stats_n; + if (xstats_ctrl->stats_n != stats_n) + mlx5_xstats_init(dev); + ret = mlx5_read_dev_counters(dev, counters); + if (ret) + return ret; + for (i = 0; i != xstats_n; ++i) { + stats[i].id = i; + stats[i].value = (counters[i] - xstats_ctrl->base[i]); + } + } + return xstats_n; } /** @@ -303,6 +335,10 @@ priv_xstats_reset(struct priv *priv) * Pointer to Ethernet device structure. * @param[out] stats * Stats structure output buffer. + * + * @return + * 0 on success and stats is filled, negative errno value otherwise and + * rte_errno is set. */ int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) @@ -312,7 +348,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) unsigned int i; unsigned int idx; - priv_lock(priv); /* Add software counters. */ for (i = 0; (i != priv->rxqs_n); ++i) { struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; @@ -358,7 +393,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* FIXME: retrieve and add hardware counters. */ #endif *stats = tmp; - priv_unlock(priv); return 0; } @@ -375,7 +409,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev) unsigned int i; unsigned int idx; - priv_lock(priv); for (i = 0; (i != priv->rxqs_n); ++i) { if ((*priv->rxqs)[i] == NULL) continue; @@ -393,45 +426,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev) #ifndef MLX5_PMD_SOFT_COUNTERS /* FIXME: reset hardware counters. */ #endif - priv_unlock(priv); -} - -/** - * DPDK callback to get extended device statistics. - * - * @param dev - * Pointer to Ethernet device structure. - * @param[out] stats - * Stats table output buffer. - * @param n - * The size of the stats table. - * - * @return - * Number of xstats on success, negative on failure. - */ -int -mlx5_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstat *stats, unsigned int n) -{ - struct priv *priv = dev->data->dev_private; - int ret = xstats_n; - - if (n >= xstats_n && stats) { - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - int stats_n; - - priv_lock(priv); - stats_n = priv_ethtool_get_stats_n(priv); - if (stats_n < 0) { - priv_unlock(priv); - return -1; - } - if (xstats_ctrl->stats_n != stats_n) - priv_xstats_init(priv); - ret = priv_xstats_get(priv, stats); - priv_unlock(priv); - } - return ret; } /** @@ -446,16 +440,27 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; + unsigned int i; + unsigned int n = xstats_n; + uint64_t counters[n]; + int ret; - priv_lock(priv); - stats_n = priv_ethtool_get_stats_n(priv); - if (stats_n < 0) - goto unlock; + stats_n = mlx5_ethtool_get_stats_n(dev); + if (stats_n < 0) { + DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id, + strerror(-stats_n)); + return; + } if (xstats_ctrl->stats_n != stats_n) - priv_xstats_init(priv); - priv_xstats_reset(priv); -unlock: - priv_unlock(priv); + mlx5_xstats_init(dev); + ret = mlx5_read_dev_counters(dev, counters); + if (ret) { + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); + return; + } + for (i = 0; i != n; ++i) + xstats_ctrl->base[i] = counters[i]; } /** @@ -472,21 +477,18 @@ unlock: * Number of xstats names. */ int -mlx5_xstats_get_names(struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, unsigned int n) +mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, unsigned int n) { - struct priv *priv = dev->data->dev_private; unsigned int i; if (n >= xstats_n && xstats_names) { - priv_lock(priv); for (i = 0; i != xstats_n; ++i) { strncpy(xstats_names[i].name, mlx5_counters_init[i].dpdk_name, RTE_ETH_XSTATS_NAME_SIZE); xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; } - priv_unlock(priv); } return xstats_n; } diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index f5711a99..3e7c0a90 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -14,83 +14,125 @@ #include "mlx5_rxtx.h" #include "mlx5_utils.h" +/** + * Stop traffic on Tx queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ static void -priv_txq_stop(struct priv *priv) +mlx5_txq_stop(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->txqs_n; ++i) - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(dev, i); } +/** + * Start traffic on Tx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ static int -priv_txq_start(struct priv *priv) +mlx5_txq_start(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; - int ret = 0; + int ret; /* Add memory regions to Tx queues. */ for (i = 0; i != priv->txqs_n; ++i) { - unsigned int idx = 0; - struct mlx5_mr *mr; - struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i); + struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); if (!txq_ctrl) continue; - LIST_FOREACH(mr, &priv->mr, next) { - priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++); - if (idx == MLX5_PMD_TX_MP_CACHE) - break; - } txq_alloc_elts(txq_ctrl); - txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i); + txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i); if (!txq_ctrl->ibv) { - ret = ENOMEM; + rte_errno = ENOMEM; goto error; } } - ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd); + ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd); if (ret) goto error; - return ret; + return 0; error: - priv_txq_stop(priv); - return ret; + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_txq_stop(dev); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } +/** + * Stop traffic on Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ static void -priv_rxq_stop(struct priv *priv) +mlx5_rxq_stop(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; for (i = 0; i != priv->rxqs_n; ++i) - mlx5_priv_rxq_release(priv, i); + mlx5_rxq_release(dev, i); } +/** + * Start traffic on Rx queues. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ static int -priv_rxq_start(struct priv *priv) +mlx5_rxq_start(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; int ret = 0; + /* Allocate/reuse/resize mempool for Multi-Packet RQ. */ + if (mlx5_mprq_alloc_mp(dev)) + goto error; for (i = 0; i != priv->rxqs_n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i); + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); + struct rte_mempool *mp; if (!rxq_ctrl) continue; + /* Pre-register Rx mempool. */ + mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp; + DRV_LOG(DEBUG, + "port %u Rx queue %u registering" + " mp %s having %u chunks", + dev->data->port_id, rxq_ctrl->idx, + mp->name, mp->nb_mem_chunks); + mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); ret = rxq_alloc_elts(rxq_ctrl); if (ret) goto error; - rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i); - if (!rxq_ctrl->ibv) { - ret = ENOMEM; + rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i); + if (!rxq_ctrl->ibv) goto error; - } } - return -ret; + return 0; error: - priv_rxq_stop(priv); - return -ret; + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_rxq_stop(dev); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -102,68 +144,64 @@ error: * Pointer to Ethernet device structure. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_dev_start(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - struct mlx5_mr *mr = NULL; - int err; + int ret; dev->data->dev_started = 1; - priv_lock(priv); - err = priv_flow_create_drop_queue(priv); - if (err) { - ERROR("%p: Drop queue allocation failed: %s", - (void *)dev, strerror(err)); + DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx queues", + dev->data->port_id); + ret = mlx5_txq_start(dev); + if (ret) { + DRV_LOG(ERR, "port %u Tx queue allocation failed: %s", + dev->data->port_id, strerror(rte_errno)); goto error; } - DEBUG("%p: allocating and configuring hash RX queues", (void *)dev); - rte_mempool_walk(mlx5_mp2mr_iter, priv); - err = priv_txq_start(priv); - if (err) { - ERROR("%p: TXQ allocation failed: %s", - (void *)dev, strerror(err)); + ret = mlx5_rxq_start(dev); + if (ret) { + DRV_LOG(ERR, "port %u Rx queue allocation failed: %s", + dev->data->port_id, strerror(rte_errno)); goto error; } - err = priv_rxq_start(priv); - if (err) { - ERROR("%p: RXQ allocation failed: %s", - (void *)dev, strerror(err)); + if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG) + mlx5_mr_dump_dev(dev); + ret = mlx5_rx_intr_vec_enable(dev); + if (ret) { + DRV_LOG(ERR, "port %u Rx interrupt vector creation failed", + dev->data->port_id); goto error; } - err = priv_rx_intr_vec_enable(priv); - if (err) { - ERROR("%p: RX interrupt vector creation failed", - (void *)priv); + mlx5_xstats_init(dev); + ret = mlx5_traffic_enable(dev); + if (ret) { + DRV_LOG(DEBUG, "port %u failed to set defaults flows", + dev->data->port_id); goto error; } - priv_xstats_init(priv); - /* Update link status and Tx/Rx callbacks for the first time. */ - memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); - INFO("Forcing port %u link to be up", dev->data->port_id); - err = priv_force_link_status_change(priv, ETH_LINK_UP); - if (err) { - DEBUG("Failed to set port %u link to be up", - dev->data->port_id); + ret = mlx5_flow_start(dev, &priv->flows); + if (ret) { + DRV_LOG(DEBUG, "port %u failed to set flows", + dev->data->port_id); goto error; } - priv_dev_interrupt_handler_install(priv, dev); - priv_unlock(priv); + dev->tx_pkt_burst = mlx5_select_tx_function(dev); + dev->rx_pkt_burst = mlx5_select_rx_function(dev); + mlx5_dev_interrupt_handler_install(dev); return 0; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ /* Rollback. */ dev->data->dev_started = 0; - for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) - priv_mr_release(priv, mr); - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); - priv_txq_stop(priv); - priv_rxq_stop(priv); - priv_flow_delete_drop_queue(priv); - priv_unlock(priv); - return err; + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -178,42 +216,38 @@ void mlx5_dev_stop(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - struct mlx5_mr *mr; - priv_lock(priv); dev->data->dev_started = 0; /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; rte_wmb(); usleep(1000 * priv->rxqs_n); - DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev); - priv_flow_stop(priv, &priv->flows); - priv_dev_traffic_disable(priv, dev); - priv_rx_intr_vec_disable(priv); - priv_dev_interrupt_handler_uninstall(priv, dev); - priv_txq_stop(priv); - priv_rxq_stop(priv); - for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr)) - priv_mr_release(priv, mr); - priv_flow_delete_drop_queue(priv); - priv_unlock(priv); + DRV_LOG(DEBUG, "port %u cleaning up and destroying hash Rx queues", + dev->data->port_id); + mlx5_flow_stop(dev, &priv->flows); + mlx5_traffic_disable(dev); + mlx5_rx_intr_vec_disable(dev); + mlx5_dev_interrupt_handler_uninstall(dev); + mlx5_txq_stop(dev); + mlx5_rxq_stop(dev); } /** * Enable traffic flows configured by control plane * - * @param priv + * @param dev * Pointer to Ethernet device private data. * @param dev * Pointer to Ethernet device structure. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) +mlx5_traffic_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_flow_item_eth bcast = { .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", }; @@ -246,8 +280,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) .type = 0, }; - claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc)); - return 0; + ret = mlx5_ctrl_flow(dev, &promisc, &promisc); + if (ret) + goto error; } if (dev->data->all_multicast) { struct rte_flow_item_eth multicast = { @@ -256,7 +291,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) .type = 0, }; - claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast)); + ret = mlx5_ctrl_flow(dev, &multicast, &multicast); + if (ret) + goto error; } else { /* Add broadcast/multicast flows. */ for (i = 0; i != vlan_filter_n; ++i) { @@ -316,74 +353,49 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev) goto error; } if (!vlan_filter_n) { - ret = mlx5_ctrl_flow(dev, &unicast, - &unicast_mask); + ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask); if (ret) goto error; } } return 0; error: - return rte_errno; + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_flow_list_flush(dev, &priv->ctrl_flows); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** * Disable traffic flows configured by control plane * - * @param priv - * Pointer to Ethernet device private data. * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success. - */ -int -priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) -{ - (void)dev; - priv_flow_flush(priv, &priv->ctrl_flows); - return 0; -} - -/** - * Restart traffic flows configured by control plane - * - * @param priv * Pointer to Ethernet device private data. - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success. */ -int -priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev) +void +mlx5_traffic_disable(struct rte_eth_dev *dev) { - if (dev->data->dev_started) { - priv_dev_traffic_disable(priv, dev); - priv_dev_traffic_enable(priv, dev); - } - return 0; + struct priv *priv = dev->data->dev_private; + + mlx5_flow_list_flush(dev, &priv->ctrl_flows); } /** * Restart traffic flows configured by control plane * * @param dev - * Pointer to Ethernet device structure. + * Pointer to Ethernet device private data. * * @return - * 0 on success. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_traffic_restart(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - - priv_lock(priv); - priv_dev_traffic_restart(priv, dev); - priv_unlock(priv); + if (dev->data->dev_started) { + mlx5_traffic_disable(dev); + return mlx5_traffic_enable(dev); + } return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index ed1c713e..691ea071 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -47,7 +47,8 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; - DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n); + DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs", + PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -68,7 +69,8 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) uint16_t elts_tail = txq_ctrl->txq.elts_tail; struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; - DEBUG("%p: freeing WRs", (void *)txq_ctrl); + DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs", + PORT_ID(txq_ctrl->priv), txq_ctrl->idx); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -91,15 +93,16 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) /** * Returns the per-port supported offloads. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return * Supported Tx offloads. */ uint64_t -mlx5_priv_get_tx_port_offloads(struct priv *priv) +mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_VLAN_INSERT); struct mlx5_dev_config *config = &priv->config; @@ -116,35 +119,13 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv) if (config->tso) offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO); + if (config->swp) + offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); } return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param priv - * Pointer to private structure. - * @param offloads - * Per-queue offloads configuration. - * - * @return - * 1 if the configuration is valid, 0 otherwise. - */ -static int -priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) -{ - uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads; - uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv); - - /* There are no Tx offloads which are per queue. */ - if ((offloads & port_supp_offloads) != offloads) - return 0; - if ((port_offloads ^ offloads) & port_supp_offloads) - return 0; - return 1; -} - /** * DPDK callback to configure a TX queue. * @@ -160,7 +141,7 @@ priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) * Thresholds parameters. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -170,64 +151,47 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_txq_data *txq = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - int ret = 0; - priv_lock(priv); - /* - * Don't verify port offloads for application which - * use the old API. - */ - if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && - !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) { - ret = ENOTSUP; - ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port " - "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64, - (void *)dev, conf->offloads, - dev->data->dev_conf.txmode.offloads, - mlx5_priv_get_tx_port_offloads(priv)); - goto out; - } if (desc <= MLX5_TX_COMP_THRESH) { - WARN("%p: number of descriptors requested for TX queue %u" - " must be higher than MLX5_TX_COMP_THRESH, using" - " %u instead of %u", - (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc); + DRV_LOG(WARNING, + "port %u number of descriptors requested for Tx queue" + " %u must be higher than MLX5_TX_COMP_THRESH, using %u" + " instead of %u", + dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc); desc = MLX5_TX_COMP_THRESH + 1; } if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); - WARN("%p: increased number of descriptors in TX queue %u" - " to the next power of two (%d)", - (void *)dev, idx, desc); + DRV_LOG(WARNING, + "port %u increased number of descriptors in Tx queue" + " %u to the next power of two (%d)", + dev->data->port_id, idx, desc); } - DEBUG("%p: configuring queue %u for %u descriptors", - (void *)dev, idx, desc); + DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors", + dev->data->port_id, idx, desc); if (idx >= priv->txqs_n) { - ERROR("%p: queue index out of range (%u >= %u)", - (void *)dev, idx, priv->txqs_n); - priv_unlock(priv); - return -EOVERFLOW; + DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)", + dev->data->port_id, idx, priv->txqs_n); + rte_errno = EOVERFLOW; + return -rte_errno; } - if (!mlx5_priv_txq_releasable(priv, idx)) { - ret = EBUSY; - ERROR("%p: unable to release queue index %u", - (void *)dev, idx); - goto out; + if (!mlx5_txq_releasable(dev, idx)) { + rte_errno = EBUSY; + DRV_LOG(ERR, "port %u unable to release queue index %u", + dev->data->port_id, idx); + return -rte_errno; } - mlx5_priv_txq_release(priv, idx); - txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf); + mlx5_txq_release(dev, idx); + txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf); if (!txq_ctrl) { - ERROR("%p: unable to allocate queue index %u", - (void *)dev, idx); - ret = ENOMEM; - goto out; + DRV_LOG(ERR, "port %u unable to allocate queue index %u", + dev->data->port_id, idx); + return -rte_errno; } - DEBUG("%p: adding TX queue %p to list", - (void *)dev, (void *)txq_ctrl); + DRV_LOG(DEBUG, "port %u adding Tx queue %u to list", + dev->data->port_id, idx); (*priv->txqs)[idx] = &txq_ctrl->txq; -out: - priv_unlock(priv); - return -ret; + return 0; } /** @@ -248,15 +212,13 @@ mlx5_tx_queue_release(void *dpdk_txq) return; txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); priv = txq_ctrl->priv; - priv_lock(priv); for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { - DEBUG("%p: removing TX queue %p from list", - (void *)priv->dev, (void *)txq_ctrl); - mlx5_priv_txq_release(priv, i); + mlx5_txq_release(ETH_DEV(priv), i); + DRV_LOG(DEBUG, "port %u removing Tx queue %u from list", + PORT_ID(priv), txq_ctrl->idx); break; } - priv_unlock(priv); } @@ -265,17 +227,18 @@ mlx5_tx_queue_release(void *dpdk_txq) * Both primary and secondary process do mmap to make UAR address * aligned. * - * @param[in] priv - * Pointer to private structure. + * @param[in] dev + * Pointer to Ethernet device. * @param fd * Verbs file descriptor to map UAR pages. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_tx_uar_remap(struct priv *priv, int fd) +mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) { + struct priv *priv = dev->data->dev_private; unsigned int i, j; uintptr_t pages[priv->txqs_n]; unsigned int pages_n = 0; @@ -287,7 +250,6 @@ priv_tx_uar_remap(struct priv *priv, int fd) struct mlx5_txq_ctrl *txq_ctrl; int already_mapped; size_t page_size = sysconf(_SC_PAGESIZE); - int r; memset(pages, 0, priv->txqs_n * sizeof(uintptr_t)); /* @@ -300,6 +262,7 @@ priv_tx_uar_remap(struct priv *priv, int fd) continue; txq = (*priv->txqs)[i]; txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); + assert(txq_ctrl->idx == (uint16_t)i); /* UAR addr form verbs used to find dup and offset in page. */ uar_va = (uintptr_t)txq_ctrl->bf_reg_orig; off = uar_va & (page_size - 1); /* offset in page. */ @@ -324,10 +287,12 @@ priv_tx_uar_remap(struct priv *priv, int fd) txq_ctrl->uar_mmap_offset); if (ret != addr) { /* fixed mmap have to return same address */ - ERROR("call to mmap failed on UAR for txq %d\n", - i); - r = ENXIO; - return r; + DRV_LOG(ERR, + "port %u call to mmap failed on UAR" + " for txq %u", + dev->data->port_id, txq_ctrl->idx); + rte_errno = ENXIO; + return -rte_errno; } } if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */ @@ -361,17 +326,18 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst) /** * Create the Tx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * * @return - * The Verbs object initialised if it can be created. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_txq_ibv* -mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) +struct mlx5_txq_ibv * +mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); @@ -388,18 +354,20 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) struct mlx5dv_cq cq_info; struct mlx5dv_obj obj; const int desc = 1 << txq_data->elts_n; - eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev); + eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev); int ret = 0; assert(txq_data); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE; priv->verbs_alloc_ctx.obj = txq_ctrl; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { - ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set"); - goto error; + DRV_LOG(ERR, + "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set", + dev->data->port_id); + rte_errno = EINVAL; + return NULL; } memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv)); - /* MRs will be registered in mp2mr[] later. */ attr.cq = (struct ibv_cq_init_attr_ex){ .comp_mask = 0, }; @@ -409,7 +377,9 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0); if (tmpl.cq == NULL) { - ERROR("%p: CQ creation failure", (void *)txq_ctrl); + DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure", + dev->data->port_id, idx); + rte_errno = errno; goto error; } attr.init = (struct ibv_qp_init_attr_ex){ @@ -450,7 +420,9 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) } tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init); if (tmpl.qp == NULL) { - ERROR("%p: QP creation failure", (void *)txq_ctrl); + DRV_LOG(ERR, "port %u Tx queue %u QP creation failure", + dev->data->port_id, idx); + rte_errno = errno; goto error; } attr.mod = (struct ibv_qp_attr){ @@ -462,7 +434,10 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT)); if (ret) { - ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl); + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_INIT failed", + dev->data->port_id, idx); + rte_errno = errno; goto error; } attr.mod = (struct ibv_qp_attr){ @@ -470,19 +445,27 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) }; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { - ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl); + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_RTR failed", + dev->data->port_id, idx); + rte_errno = errno; goto error; } attr.mod.qp_state = IBV_QPS_RTS; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { - ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl); + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_RTS failed", + dev->data->port_id, idx); + rte_errno = errno; goto error; } txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0, txq_ctrl->socket); if (!txq_ibv) { - ERROR("%p: cannot allocate memory", (void *)txq_ctrl); + DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory", + dev->data->port_id, idx); + rte_errno = ENOMEM; goto error; } obj.cq.in = tmpl.cq; @@ -490,11 +473,16 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) obj.qp.in = tmpl.qp; obj.qp.out = &qp; ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP); - if (ret != 0) + if (ret != 0) { + rte_errno = errno; goto error; + } if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { - ERROR("Wrong MLX5_CQE_SIZE environment variable value: " - "it should be set to %u", RTE_CACHE_LINE_SIZE); + DRV_LOG(ERR, + "port %u wrong MLX5_CQE_SIZE environment variable" + " value: it should be set to %u", + dev->data->port_id, RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; goto error; } txq_data->cqe_n = log2above(cq_info.cqe_cnt); @@ -519,37 +507,45 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; } else { - ERROR("Failed to retrieve UAR info, invalid libmlx5.so version"); + DRV_LOG(ERR, + "port %u failed to retrieve UAR info, invalid" + " libmlx5.so", + dev->data->port_id); + rte_errno = EINVAL; goto error; } - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, - (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); + DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", + dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt)); LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next); + txq_ibv->txq_ctrl = txq_ctrl; priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return txq_ibv; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (tmpl.cq) claim_zero(mlx5_glue->destroy_cq(tmpl.cq)); if (tmpl.qp) claim_zero(mlx5_glue->destroy_qp(tmpl.qp)); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ return NULL; } /** * Get an Tx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * * @return * The Verbs object if it exists. */ -struct mlx5_txq_ibv* -mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) +struct mlx5_txq_ibv * +mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq_ctrl; if (idx >= priv->txqs_n) @@ -559,8 +555,8 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); if (txq_ctrl->ibv) { rte_atomic32_inc(&txq_ctrl->ibv->refcnt); - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, - (void *)txq_ctrl->ibv, + DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", + dev->data->port_id, txq_ctrl->idx, rte_atomic32_read(&txq_ctrl->ibv->refcnt)); } return txq_ctrl->ibv; @@ -569,21 +565,19 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) /** * Release an Tx verbs queue object. * - * @param priv - * Pointer to private structure. * @param txq_ibv * Verbs Tx queue object. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); - DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, - (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); + DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d", + PORT_ID(txq_ibv->txq_ctrl->priv), + txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp)); claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq)); @@ -591,21 +585,18 @@ mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) rte_free(txq_ibv); return 0; } - return EBUSY; + return 1; } /** * Return true if a single reference exists on the object. * - * @param priv - * Pointer to private structure. * @param txq_ibv * Verbs Tx queue object. */ int -mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); return (rte_atomic32_read(&txq_ibv->refcnt) == 1); } @@ -613,20 +604,22 @@ mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) /** * Verify the Verbs Tx queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_txq_ibv_verify(struct priv *priv) +mlx5_txq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_txq_ibv *txq_ibv; LIST_FOREACH(txq_ibv, &priv->txqsibv, next) { - DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv, - (void *)txq_ibv); + DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced", + dev->data->port_id, txq_ibv->txq_ctrl->idx); ++ret; } return ret; @@ -649,9 +642,14 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) unsigned int txq_inline; unsigned int txqs_inline; unsigned int inline_max_packet_sz; - eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev); + eth_tx_burst_t tx_pkt_burst = + mlx5_select_tx_function(ETH_DEV(priv)); int is_empw_func = is_empw_burst_func(tx_pkt_burst); - int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO); + int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO)); txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ? 0 : config->txq_inline; @@ -685,18 +683,6 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) inline_max_packet_sz) + (RTE_CACHE_LINE_SIZE - 1)) / RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; - } else if (tso) { - int inline_diff = txq_ctrl->txq.max_inline - - max_tso_inline; - - /* - * Adjust inline value as Verbs aggregates - * tso_inline and txq_inline fields. - */ - txq_ctrl->max_inline_data = inline_diff > 0 ? - inline_diff * - RTE_CACHE_LINE_SIZE : - 0; } else { txq_ctrl->max_inline_data = txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE; @@ -716,9 +702,10 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) max_inline = max_inline - (max_inline % RTE_CACHE_LINE_SIZE); - WARN("txq inline is too large (%d) setting it to " - "the maximum possible: %d\n", - txq_inline, max_inline); + DRV_LOG(WARNING, + "port %u txq inline is too large (%d) setting" + " it to the maximum possible: %d\n", + PORT_ID(priv), txq_inline, max_inline); txq_ctrl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE; } @@ -730,13 +717,17 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) txq_ctrl->txq.tso_en = 1; } txq_ctrl->txq.tunnel_en = config->tunnel_en; + txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) & + txq_ctrl->txq.offloads) && config->swp; } /** * Create a DPDK Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @param desc @@ -747,76 +738,80 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) * Thresholds parameters. * * @return - * A DPDK queue object on success. + * A DPDK queue object on success, NULL otherwise and rte_errno is set. */ -struct mlx5_txq_ctrl* -mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, - unsigned int socket, - const struct rte_eth_txconf *conf) +struct mlx5_txq_ctrl * +mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_txconf *conf) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *tmpl; tmpl = rte_calloc_socket("TXQ", 1, sizeof(*tmpl) + desc * sizeof(struct rte_mbuf *), 0, socket); - if (!tmpl) + if (!tmpl) { + rte_errno = ENOMEM; return NULL; + } + if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh, + MLX5_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } + /* Save pointer of global generation number to check memory event. */ + tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen; assert(desc > MLX5_TX_COMP_THRESH); - tmpl->txq.offloads = conf->offloads; + tmpl->txq.offloads = conf->offloads | + dev->data->dev_conf.txmode.offloads; tmpl->priv = priv; tmpl->socket = socket; tmpl->txq.elts_n = log2above(desc); + tmpl->idx = idx; txq_set_params(tmpl); - /* MRs will be registered in mp2mr[] later. */ - DEBUG("priv->device_attr.max_qp_wr is %d", - priv->device_attr.orig_attr.max_qp_wr); - DEBUG("priv->device_attr.max_sge is %d", - priv->device_attr.orig_attr.max_sge); + DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr); + DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_sge); tmpl->txq.elts = (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); tmpl->txq.stats.idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, - (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); + DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id, + idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; +error: + rte_free(tmpl); + return NULL; } /** * Get a Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return * A pointer to the queue if it exists. */ -struct mlx5_txq_ctrl* -mlx5_priv_txq_get(struct priv *priv, uint16_t idx) +struct mlx5_txq_ctrl * +mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *ctrl = NULL; if ((*priv->txqs)[idx]) { ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - unsigned int i; - - mlx5_priv_txq_ibv_get(priv, idx); - for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { - struct mlx5_mr *mr = NULL; - - (void)mr; - if (ctrl->txq.mp2mr[i]) { - mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp); - assert(mr); - } - } + mlx5_txq_ibv_get(dev, idx); rte_atomic32_inc(&ctrl->refcnt); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, - (void *)ctrl, rte_atomic32_read(&ctrl->refcnt)); + DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d", + dev->data->port_id, + ctrl->idx, rte_atomic32_read(&ctrl->refcnt)); } return ctrl; } @@ -824,57 +819,47 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) /** * Release a Tx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * 0 on success, errno on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_txq_release(struct priv *priv, uint16_t idx) +mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) { - unsigned int i; + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; size_t page_size = sysconf(_SC_PAGESIZE); if (!(*priv->txqs)[idx]) return 0; txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv, - (void *)txq, rte_atomic32_read(&txq->refcnt)); - if (txq->ibv) { - int ret; - - ret = mlx5_priv_txq_ibv_release(priv, txq->ibv); - if (!ret) - txq->ibv = NULL; - } - for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { - if (txq->txq.mp2mr[i]) { - priv_mr_release(priv, txq->txq.mp2mr[i]); - txq->txq.mp2mr[i] = NULL; - } - } + DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id, + txq->idx, rte_atomic32_read(&txq->refcnt)); + if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv)) + txq->ibv = NULL; if (priv->uar_base) munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg, page_size), page_size); if (rte_atomic32_dec_and_test(&txq->refcnt)) { txq_free_elts(txq); + mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh); LIST_REMOVE(txq, next); rte_free(txq); (*priv->txqs)[idx] = NULL; return 0; } - return EBUSY; + return 1; } /** * Verify if the queue can be released. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @@ -882,8 +867,9 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx) * 1 if the queue can be released. */ int -mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) +mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; if (!(*priv->txqs)[idx]) @@ -895,20 +881,22 @@ mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx) /** * Verify the Tx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_txq_verify(struct priv *priv) +mlx5_txq_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_txq_ctrl *txq; int ret = 0; LIST_FOREACH(txq, &priv->txqsctrl, next) { - DEBUG("%p: Tx Queue %p still referenced", (void *)priv, - (void *)txq); + DRV_LOG(DEBUG, "port %u Tx queue %u still referenced", + dev->data->port_id, txq->idx); ++ret; } return ret; diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h index e1bfb9cd..886f60e6 100644 --- a/drivers/net/mlx5/mlx5_utils.h +++ b/drivers/net/mlx5/mlx5_utils.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_UTILS_H_ @@ -61,14 +61,21 @@ pmd_drv_log_basename(const char *s) return s; } +extern int mlx5_logtype; + +#define PMD_DRV_LOG___(level, ...) \ + rte_log(RTE_LOG_ ## level, \ + mlx5_logtype, \ + RTE_FMT(MLX5_DRIVER_NAME ": " \ + RTE_FMT_HEAD(__VA_ARGS__,), \ + RTE_FMT_TAIL(__VA_ARGS__,))) + /* * When debugging is enabled (NDEBUG not defined), file, line and function * information replace the driver name (MLX5_DRIVER_NAME) in log messages. */ #ifndef NDEBUG -#define PMD_DRV_LOG___(level, ...) \ - ERRNO_SAFE(RTE_LOG(level, PMD, __VA_ARGS__)) #define PMD_DRV_LOG__(level, ...) \ PMD_DRV_LOG___(level, "%s:%u: %s(): " __VA_ARGS__) #define PMD_DRV_LOG_(level, s, ...) \ @@ -80,9 +87,6 @@ pmd_drv_log_basename(const char *s) __VA_ARGS__) #else /* NDEBUG */ - -#define PMD_DRV_LOG___(level, ...) \ - ERRNO_SAFE(RTE_LOG(level, PMD, MLX5_DRIVER_NAME ": " __VA_ARGS__)) #define PMD_DRV_LOG__(level, ...) \ PMD_DRV_LOG___(level, __VA_ARGS__) #define PMD_DRV_LOG_(level, s, ...) \ @@ -91,18 +95,15 @@ pmd_drv_log_basename(const char *s) #endif /* NDEBUG */ /* Generic printf()-like logging macro with automatic line feed. */ -#define PMD_DRV_LOG(level, ...) \ +#define DRV_LOG(level, ...) \ PMD_DRV_LOG_(level, \ __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \ PMD_DRV_LOG_CPAREN) -/* - * Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform - * any check when debugging is disabled. - */ +/* claim_zero() does not perform any check when debugging is disabled. */ #ifndef NDEBUG -#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__) +#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__) #define claim_zero(...) assert((__VA_ARGS__) == 0) #define claim_nonzero(...) assert((__VA_ARGS__) != 0) @@ -114,9 +115,9 @@ pmd_drv_log_basename(const char *s) #endif /* NDEBUG */ -#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__) -#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__) -#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__) +#define INFO(...) DRV_LOG(INFO, __VA_ARGS__) +#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__) +#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__) /* Convenience macros for accessing mbuf fields. */ #define NEXT(m) ((m)->next) diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 75c34562..c91d08be 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -8,6 +8,12 @@ #include #include +/* + * Not needed by this file; included to work around the lack of off_t + * definition for mlx5dv.h with unpatched rdma-core versions. + */ +#include + /* Verbs headers do not support -pedantic. */ #ifdef PEDANTIC #pragma GCC diagnostic ignored "-Wpedantic" @@ -37,26 +43,24 @@ * Toggle filter. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct priv *priv = dev->data->dev_private; unsigned int i; - int ret = 0; - priv_lock(priv); - DEBUG("%p: %s VLAN filter ID %" PRIu16, - (void *)dev, (on ? "enable" : "disable"), vlan_id); + DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16, + dev->data->port_id, (on ? "enable" : "disable"), vlan_id); assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter)); for (i = 0; (i != priv->vlan_filter_n); ++i) if (priv->vlan_filter[i] == vlan_id) break; /* Check if there's room for another VLAN filter. */ if (i == RTE_DIM(priv->vlan_filter)) { - ret = -ENOMEM; - goto out; + rte_errno = ENOMEM; + return -rte_errno; } if (i < priv->vlan_filter_n) { assert(priv->vlan_filter_n != 0); @@ -79,37 +83,49 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) priv->vlan_filter[priv->vlan_filter_n] = vlan_id; ++priv->vlan_filter_n; } - if (dev->data->dev_started) - priv_dev_traffic_restart(priv, dev); out: - priv_unlock(priv); - return ret; + if (dev->data->dev_started) + return mlx5_traffic_restart(dev); + return 0; } /** - * Set/reset VLAN stripping for a specific queue. + * Callback to set/reset VLAN stripping for a specific queue. * - * @param priv - * Pointer to private structure. - * @param idx + * @param dev + * Pointer to Ethernet device structure. + * @param queue * RX queue index. * @param on * Enable/disable VLAN stripping. */ -static void -priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) +void +mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); struct ibv_wq_attr mod; uint16_t vlan_offloads = (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) | 0; - int err; + int ret; - DEBUG("set VLAN offloads 0x%x for port %d queue %d", - vlan_offloads, rxq->port_id, idx); + /* Validate hw support */ + if (!priv->config.hw_vlan_strip) { + DRV_LOG(ERR, "port %u VLAN stripping is not supported", + dev->data->port_id); + return; + } + /* Validate queue number */ + if (queue >= priv->rxqs_n) { + DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d", + dev->data->port_id, queue); + return; + } + DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d", + dev->data->port_id, vlan_offloads, rxq->port_id, queue); if (!rxq_ctrl->ibv) { /* Update related bits in RX queue. */ rxq->vlan_strip = !!on; @@ -120,50 +136,16 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING, .flags = vlan_offloads, }; - - err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); - if (err) { - ERROR("%p: failed to modified stripping mode: %s", - (void *)priv, strerror(err)); + ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod); + if (ret) { + DRV_LOG(ERR, "port %u failed to modified stripping mode: %s", + dev->data->port_id, strerror(rte_errno)); return; } - /* Update related bits in RX queue. */ rxq->vlan_strip = !!on; } -/** - * Callback to set/reset VLAN stripping for a specific queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param queue - * RX queue index. - * @param on - * Enable/disable VLAN stripping. - */ -void -mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) -{ - struct priv *priv = dev->data->dev_private; - - /* Validate hw support */ - if (!priv->config.hw_vlan_strip) { - ERROR("VLAN stripping is not supported"); - return; - } - - /* Validate queue number */ - if (queue >= priv->rxqs_n) { - ERROR("VLAN stripping, invalid queue number %d", queue); - return; - } - - priv_lock(priv); - priv_vlan_strip_queue_set(priv, queue, on); - priv_unlock(priv); -} - /** * Callback to set/reset VLAN offloads for a port. * @@ -171,6 +153,9 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) * Pointer to Ethernet device structure. * @param mask * VLAN offload bit mask. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) @@ -183,16 +168,13 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) DEV_RX_OFFLOAD_VLAN_STRIP); if (!priv->config.hw_vlan_strip) { - ERROR("VLAN stripping is not supported"); + DRV_LOG(ERR, "port %u VLAN stripping is not supported", + dev->data->port_id); return 0; } - /* Run on every RX queue and set/reset VLAN stripping. */ - priv_lock(priv); for (i = 0; (i != priv->rxqs_n); i++) - priv_vlan_strip_queue_set(priv, i, hw_vlan_strip); - priv_unlock(priv); + mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip); } - return 0; } diff --git a/drivers/net/mrvl/Makefile b/drivers/net/mrvl/Makefile deleted file mode 100644 index f75e53c6..00000000 --- a/drivers/net/mrvl/Makefile +++ /dev/null @@ -1,68 +0,0 @@ -# BSD LICENSE -# -# Copyright(c) 2017 Marvell International Ltd. -# Copyright(c) 2017 Semihalf. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -include $(RTE_SDK)/mk/rte.vars.mk - -ifneq ($(MAKECMDGOALS),clean) -ifneq ($(MAKECMDGOALS),config) -ifeq ($(LIBMUSDK_PATH),) -$(error "Please define LIBMUSDK_PATH environment variable") -endif -endif -endif - -# library name -LIB = librte_pmd_mrvl.a - -# library version -LIBABIVER := 1 - -# versioning export map -EXPORT_MAP := rte_pmd_mrvl_version.map - -# external library dependencies -CFLAGS += -I$(LIBMUSDK_PATH)/include -CFLAGS += -DMVCONF_TYPES_PUBLIC -CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC -CFLAGS += $(WERROR_FLAGS) -CFLAGS += -O3 -LDLIBS += -L$(LIBMUSDK_PATH)/lib -LDLIBS += -lmusdk -LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile -LDLIBS += -lrte_bus_vdev - -# library source files -SRCS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl_ethdev.c -SRCS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl_qos.c - -include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mrvl/mrvl_ethdev.c deleted file mode 100644 index 705c4bd8..00000000 --- a/drivers/net/mrvl/mrvl_ethdev.c +++ /dev/null @@ -1,2511 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Semihalf nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include - -/* Unluckily, container_of is defined by both DPDK and MUSDK, - * we'll declare only one version. - * - * Note that it is not used in this PMD anyway. - */ -#ifdef container_of -#undef container_of -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mrvl_ethdev.h" -#include "mrvl_qos.h" - -/* bitmask with reserved hifs */ -#define MRVL_MUSDK_HIFS_RESERVED 0x0F -/* bitmask with reserved bpools */ -#define MRVL_MUSDK_BPOOLS_RESERVED 0x07 -/* bitmask with reserved kernel RSS tables */ -#define MRVL_MUSDK_RSS_RESERVED 0x01 -/* maximum number of available hifs */ -#define MRVL_MUSDK_HIFS_MAX 9 - -/* prefetch shift */ -#define MRVL_MUSDK_PREFETCH_SHIFT 2 - -/* TCAM has 25 entries reserved for uc/mc filter entries */ -#define MRVL_MAC_ADDRS_MAX 25 -#define MRVL_MATCH_LEN 16 -#define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE) -/* Maximum allowable packet size */ -#define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE) - -#define MRVL_IFACE_NAME_ARG "iface" -#define MRVL_CFG_ARG "cfg" - -#define MRVL_BURST_SIZE 64 - -#define MRVL_ARP_LENGTH 28 - -#define MRVL_COOKIE_ADDR_INVALID ~0ULL - -#define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8) -#define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT) - -/* Memory size (in bytes) for MUSDK dma buffers */ -#define MRVL_MUSDK_DMA_MEMSIZE 41943040 - -/** Port Rx offload capabilities */ -#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \ - DEV_RX_OFFLOAD_JUMBO_FRAME | \ - DEV_RX_OFFLOAD_CRC_STRIP | \ - DEV_RX_OFFLOAD_CHECKSUM) - -/** Port Tx offloads capabilities */ -#define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \ - DEV_TX_OFFLOAD_UDP_CKSUM | \ - DEV_TX_OFFLOAD_TCP_CKSUM) - -static const char * const valid_args[] = { - MRVL_IFACE_NAME_ARG, - MRVL_CFG_ARG, - NULL -}; - -static int used_hifs = MRVL_MUSDK_HIFS_RESERVED; -static struct pp2_hif *hifs[RTE_MAX_LCORE]; -static int used_bpools[PP2_NUM_PKT_PROC] = { - MRVL_MUSDK_BPOOLS_RESERVED, - MRVL_MUSDK_BPOOLS_RESERVED -}; - -struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS]; -int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE]; -uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID; - -struct mrvl_ifnames { - const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC]; - int idx; -}; - -/* - * To use buffer harvesting based on loopback port shadow queue structure - * was introduced for buffers information bookkeeping. - * - * Before sending the packet, related buffer information (pp2_buff_inf) is - * stored in shadow queue. After packet is transmitted no longer used - * packet buffer is released back to it's original hardware pool, - * on condition it originated from interface. - * In case it was generated by application itself i.e: mbuf->port field is - * 0xff then its released to software mempool. - */ -struct mrvl_shadow_txq { - int head; /* write index - used when sending buffers */ - int tail; /* read index - used when releasing buffers */ - u16 size; /* queue occupied size */ - u16 num_to_release; /* number of buffers sent, that can be released */ - struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */ -}; - -struct mrvl_rxq { - struct mrvl_priv *priv; - struct rte_mempool *mp; - int queue_id; - int port_id; - int cksum_enabled; - uint64_t bytes_recv; - uint64_t drop_mac; -}; - -struct mrvl_txq { - struct mrvl_priv *priv; - int queue_id; - int port_id; - uint64_t bytes_sent; - struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE]; -}; - -static int mrvl_lcore_first; -static int mrvl_lcore_last; -static int mrvl_dev_num; - -static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num); -static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio, - struct pp2_hif *hif, unsigned int core_id, - struct mrvl_shadow_txq *sq, int qid, int force); - -static inline int -mrvl_get_bpool_size(int pp2_id, int pool_id) -{ - int i; - int size = 0; - - for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) - size += mrvl_port_bpool_size[pp2_id][pool_id][i]; - - return size; -} - -static inline int -mrvl_reserve_bit(int *bitmap, int max) -{ - int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); - - if (n >= max) - return -1; - - *bitmap |= 1 << n; - - return n; -} - -static int -mrvl_init_hif(int core_id) -{ - struct pp2_hif_params params; - char match[MRVL_MATCH_LEN]; - int ret; - - ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX); - if (ret < 0) { - RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id); - return ret; - } - - snprintf(match, sizeof(match), "hif-%d", ret); - memset(¶ms, 0, sizeof(params)); - params.match = match; - params.out_size = MRVL_PP2_AGGR_TXQD_MAX; - ret = pp2_hif_init(¶ms, &hifs[core_id]); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id); - return ret; - } - - return 0; -} - -static inline struct pp2_hif* -mrvl_get_hif(struct mrvl_priv *priv, int core_id) -{ - int ret; - - if (likely(hifs[core_id] != NULL)) - return hifs[core_id]; - - rte_spinlock_lock(&priv->lock); - - ret = mrvl_init_hif(core_id); - if (ret < 0) { - RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id); - goto out; - } - - if (core_id < mrvl_lcore_first) - mrvl_lcore_first = core_id; - - if (core_id > mrvl_lcore_last) - mrvl_lcore_last = core_id; -out: - rte_spinlock_unlock(&priv->lock); - - return hifs[core_id]; -} - -/** - * Configure rss based on dpdk rss configuration. - * - * @param priv - * Pointer to private structure. - * @param rss_conf - * Pointer to RSS configuration. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf) -{ - if (rss_conf->rss_key) - RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n"); - - if (rss_conf->rss_hf == 0) { - priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; - } else if (rss_conf->rss_hf & ETH_RSS_IPV4) { - priv->ppio_params.inqs_params.hash_type = - PP2_PPIO_HASH_T_2_TUPLE; - } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { - priv->ppio_params.inqs_params.hash_type = - PP2_PPIO_HASH_T_5_TUPLE; - priv->rss_hf_tcp = 1; - } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { - priv->ppio_params.inqs_params.hash_type = - PP2_PPIO_HASH_T_5_TUPLE; - priv->rss_hf_tcp = 0; - } else { - return -EINVAL; - } - - return 0; -} - -/** - * Ethernet device configuration. - * - * Prepare the driver for a given number of TX and RX queues and - * configure RSS. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_dev_configure(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE && - dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { - RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n", - dev->data->dev_conf.rxmode.mq_mode); - return -EINVAL; - } - - if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { - RTE_LOG(INFO, PMD, - "L2 CRC stripping is always enabled in hw\n"); - dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; - } - - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { - RTE_LOG(INFO, PMD, "VLAN stripping not supported\n"); - return -EINVAL; - } - - if (dev->data->dev_conf.rxmode.split_hdr_size) { - RTE_LOG(INFO, PMD, "Split headers not supported\n"); - return -EINVAL; - } - - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { - RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n"); - return -EINVAL; - } - - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { - RTE_LOG(INFO, PMD, "LRO not supported\n"); - return -EINVAL; - } - - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) - dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN; - - ret = mrvl_configure_rxqs(priv, dev->data->port_id, - dev->data->nb_rx_queues); - if (ret < 0) - return ret; - - priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues; - priv->ppio_params.maintain_stats = 1; - priv->nb_rx_queues = dev->data->nb_rx_queues; - - if (dev->data->nb_rx_queues == 1 && - dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { - RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n"); - priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; - - return 0; - } - - return mrvl_configure_rss(priv, - &dev->data->dev_conf.rx_adv_conf.rss_conf); -} - -/** - * DPDK callback to change the MTU. - * - * Setting the MTU affects hardware MRU (packets larger than the MRU - * will be dropped). - * - * @param dev - * Pointer to Ethernet device structure. - * @param mtu - * New MTU. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) -{ - struct mrvl_priv *priv = dev->data->dev_private; - /* extra MV_MH_SIZE bytes are required for Marvell tag */ - uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN; - int ret; - - if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) - return -EINVAL; - - if (!priv->ppio) - return 0; - - ret = pp2_ppio_set_mru(priv->ppio, mru); - if (ret) - return ret; - - return pp2_ppio_set_mtu(priv->ppio, mtu); -} - -/** - * DPDK callback to bring the link up. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_dev_set_link_up(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return -EPERM; - - ret = pp2_ppio_enable(priv->ppio); - if (ret) - return ret; - - /* - * mtu/mru can be updated if pp2_ppio_enable() was called at least once - * as pp2_ppio_enable() changes port->t_mode from default 0 to - * PP2_TRAFFIC_INGRESS_EGRESS. - * - * Set mtu to default DPDK value here. - */ - ret = mrvl_mtu_set(dev, dev->data->mtu); - if (ret) - pp2_ppio_disable(priv->ppio); - - return ret; -} - -/** - * DPDK callback to bring the link down. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_dev_set_link_down(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - - if (!priv->ppio) - return -EPERM; - - return pp2_ppio_disable(priv->ppio); -} - -/** - * DPDK callback to start the device. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * 0 on success, negative errno value on failure. - */ -static int -mrvl_dev_start(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - char match[MRVL_MATCH_LEN]; - int ret = 0, def_init_size; - - snprintf(match, sizeof(match), "ppio-%d:%d", - priv->pp_id, priv->ppio_id); - priv->ppio_params.match = match; - - /* - * Calculate the minimum bpool size for refill feature as follows: - * 2 default burst sizes multiply by number of rx queues. - * If the bpool size will be below this value, new buffers will - * be added to the pool. - */ - priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2; - - /* In case initial bpool size configured in queues setup is - * smaller than minimum size add more buffers - */ - def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2; - if (priv->bpool_init_size < def_init_size) { - int buffs_to_add = def_init_size - priv->bpool_init_size; - - priv->bpool_init_size += buffs_to_add; - ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add); - if (ret) - RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n"); - } - - /* - * Calculate the maximum bpool size for refill feature as follows: - * maximum number of descriptors in rx queue multiply by number - * of rx queues plus minimum bpool size. - * In case the bpool size will exceed this value, superfluous buffers - * will be removed - */ - priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) + - priv->bpool_min_size; - - ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to init ppio\n"); - return ret; - } - - /* - * In case there are some some stale uc/mc mac addresses flush them - * here. It cannot be done during mrvl_dev_close() as port information - * is already gone at that point (due to pp2_ppio_deinit() in - * mrvl_dev_stop()). - */ - if (!priv->uc_mc_flushed) { - ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1); - if (ret) { - RTE_LOG(ERR, PMD, - "Failed to flush uc/mc filter list\n"); - goto out; - } - priv->uc_mc_flushed = 1; - } - - if (!priv->vlan_flushed) { - ret = pp2_ppio_flush_vlan(priv->ppio); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to flush vlan list\n"); - /* - * TODO - * once pp2_ppio_flush_vlan() is supported jump to out - * goto out; - */ - } - priv->vlan_flushed = 1; - } - - /* For default QoS config, don't start classifier. */ - if (mrvl_qos_cfg) { - ret = mrvl_start_qos_mapping(priv); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n"); - goto out; - } - } - - ret = mrvl_dev_set_link_up(dev); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to set link up\n"); - goto out; - } - - return 0; -out: - RTE_LOG(ERR, PMD, "Failed to start device\n"); - pp2_ppio_deinit(priv->ppio); - return ret; -} - -/** - * Flush receive queues. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_flush_rx_queues(struct rte_eth_dev *dev) -{ - int i; - - RTE_LOG(INFO, PMD, "Flushing rx queues\n"); - for (i = 0; i < dev->data->nb_rx_queues; i++) { - int ret, num; - - do { - struct mrvl_rxq *q = dev->data->rx_queues[i]; - struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX]; - - num = MRVL_PP2_RXD_MAX; - ret = pp2_ppio_recv(q->priv->ppio, - q->priv->rxq_map[q->queue_id].tc, - q->priv->rxq_map[q->queue_id].inq, - descs, (uint16_t *)&num); - } while (ret == 0 && num); - } -} - -/** - * Flush transmit shadow queues. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev) -{ - int i, j; - struct mrvl_txq *txq; - - RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n"); - for (i = 0; i < dev->data->nb_tx_queues; i++) { - txq = (struct mrvl_txq *)dev->data->tx_queues[i]; - - for (j = 0; j < RTE_MAX_LCORE; j++) { - struct mrvl_shadow_txq *sq; - - if (!hifs[j]) - continue; - - sq = &txq->shadow_txqs[j]; - mrvl_free_sent_buffers(txq->priv->ppio, - hifs[j], j, sq, txq->queue_id, 1); - while (sq->tail != sq->head) { - uint64_t addr = cookie_addr_high | - sq->ent[sq->tail].buff.cookie; - rte_pktmbuf_free( - (struct rte_mbuf *)addr); - sq->tail = (sq->tail + 1) & - MRVL_PP2_TX_SHADOWQ_MASK; - } - memset(sq, 0, sizeof(*sq)); - } - } -} - -/** - * Flush hardware bpool (buffer-pool). - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_flush_bpool(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - struct pp2_hif *hif; - uint32_t num; - int ret; - unsigned int core_id = rte_lcore_id(); - - if (core_id == LCORE_ID_ANY) - core_id = 0; - - hif = mrvl_get_hif(priv, core_id); - - ret = pp2_bpool_get_num_buffs(priv->bpool, &num); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n"); - return; - } - - while (num--) { - struct pp2_buff_inf inf; - uint64_t addr; - - ret = pp2_bpool_get_buff(hif, priv->bpool, &inf); - if (ret) - break; - - addr = cookie_addr_high | inf.cookie; - rte_pktmbuf_free((struct rte_mbuf *)addr); - } -} - -/** - * DPDK callback to stop the device. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_dev_stop(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - - mrvl_dev_set_link_down(dev); - mrvl_flush_rx_queues(dev); - mrvl_flush_tx_shadow_queues(dev); - if (priv->qos_tbl) { - pp2_cls_qos_tbl_deinit(priv->qos_tbl); - priv->qos_tbl = NULL; - } - pp2_ppio_deinit(priv->ppio); - priv->ppio = NULL; -} - -/** - * DPDK callback to close the device. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_dev_close(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - size_t i; - - for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) { - struct pp2_ppio_tc_params *tc_params = - &priv->ppio_params.inqs_params.tcs_params[i]; - - if (tc_params->inqs_params) { - rte_free(tc_params->inqs_params); - tc_params->inqs_params = NULL; - } - } - - mrvl_flush_bpool(dev); -} - -/** - * DPDK callback to retrieve physical link information. - * - * @param dev - * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) -{ - /* - * TODO - * once MUSDK provides necessary API use it here - */ - struct mrvl_priv *priv = dev->data->dev_private; - struct ethtool_cmd edata; - struct ifreq req; - int ret, fd, link_up; - - if (!priv->ppio) - return -EPERM; - - edata.cmd = ETHTOOL_GSET; - - strcpy(req.ifr_name, dev->data->name); - req.ifr_data = (void *)&edata; - - fd = socket(AF_INET, SOCK_DGRAM, 0); - if (fd == -1) - return -EFAULT; - - ret = ioctl(fd, SIOCETHTOOL, &req); - if (ret == -1) { - close(fd); - return -EFAULT; - } - - close(fd); - - switch (ethtool_cmd_speed(&edata)) { - case SPEED_10: - dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; - break; - case SPEED_100: - dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; - break; - case SPEED_1000: - dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; - break; - case SPEED_10000: - dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; - break; - default: - dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; - } - - dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : - ETH_LINK_HALF_DUPLEX; - dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : - ETH_LINK_FIXED; - pp2_ppio_get_link_state(priv->ppio, &link_up); - dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; - - return 0; -} - -/** - * DPDK callback to enable promiscuous mode. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_promiscuous_enable(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_set_promisc(priv->ppio, 1); - if (ret) - RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n"); -} - -/** - * DPDK callback to enable allmulti mode. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_allmulticast_enable(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_set_mc_promisc(priv->ppio, 1); - if (ret) - RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n"); -} - -/** - * DPDK callback to disable promiscuous mode. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_promiscuous_disable(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_set_promisc(priv->ppio, 0); - if (ret) - RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n"); -} - -/** - * DPDK callback to disable allmulticast mode. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_allmulticast_disable(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_set_mc_promisc(priv->ppio, 0); - if (ret) - RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n"); -} - -/** - * DPDK callback to remove a MAC address. - * - * @param dev - * Pointer to Ethernet device structure. - * @param index - * MAC address index. - */ -static void -mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) -{ - struct mrvl_priv *priv = dev->data->dev_private; - char buf[ETHER_ADDR_FMT_SIZE]; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_remove_mac_addr(priv->ppio, - dev->data->mac_addrs[index].addr_bytes); - if (ret) { - ether_format_addr(buf, sizeof(buf), - &dev->data->mac_addrs[index]); - RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf); - } -} - -/** - * DPDK callback to add a MAC address. - * - * @param dev - * Pointer to Ethernet device structure. - * @param mac_addr - * MAC address to register. - * @param index - * MAC address index. - * @param vmdq - * VMDq pool index to associate address with (unused). - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t vmdq __rte_unused) -{ - struct mrvl_priv *priv = dev->data->dev_private; - char buf[ETHER_ADDR_FMT_SIZE]; - int ret; - - if (index == 0) - /* For setting index 0, mrvl_mac_addr_set() should be used.*/ - return -1; - - if (!priv->ppio) - return 0; - - /* - * Maximum number of uc addresses can be tuned via kernel module mvpp2x - * parameter uc_filter_max. Maximum number of mc addresses is then - * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and - * 21 respectively. - * - * If more than uc_filter_max uc addresses were added to filter list - * then NIC will switch to promiscuous mode automatically. - * - * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses - * were added to filter list then NIC will switch to all-multicast mode - * automatically. - */ - ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); - if (ret) { - ether_format_addr(buf, sizeof(buf), mac_addr); - RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf); - return -1; - } - - return 0; -} - -/** - * DPDK callback to set the primary MAC address. - * - * @param dev - * Pointer to Ethernet device structure. - * @param mac_addr - * MAC address to register. - */ -static void -mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int ret; - - if (!priv->ppio) - return; - - ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); - if (ret) { - char buf[ETHER_ADDR_FMT_SIZE]; - ether_format_addr(buf, sizeof(buf), mac_addr); - RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf); - } -} - -/** - * DPDK callback to get device statistics. - * - * @param dev - * Pointer to Ethernet device structure. - * @param stats - * Stats structure output buffer. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) -{ - struct mrvl_priv *priv = dev->data->dev_private; - struct pp2_ppio_statistics ppio_stats; - uint64_t drop_mac = 0; - unsigned int i, idx, ret; - - if (!priv->ppio) - return -EPERM; - - for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct mrvl_rxq *rxq = dev->data->rx_queues[i]; - struct pp2_ppio_inq_statistics rx_stats; - - if (!rxq) - continue; - - idx = rxq->queue_id; - if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { - RTE_LOG(ERR, PMD, - "rx queue %d stats out of range (0 - %d)\n", - idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); - continue; - } - - ret = pp2_ppio_inq_get_statistics(priv->ppio, - priv->rxq_map[idx].tc, - priv->rxq_map[idx].inq, - &rx_stats, 0); - if (unlikely(ret)) { - RTE_LOG(ERR, PMD, - "Failed to update rx queue %d stats\n", idx); - break; - } - - stats->q_ibytes[idx] = rxq->bytes_recv; - stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac; - stats->q_errors[idx] = rx_stats.drop_early + - rx_stats.drop_fullq + - rx_stats.drop_bm + - rxq->drop_mac; - stats->ibytes += rxq->bytes_recv; - drop_mac += rxq->drop_mac; - } - - for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct mrvl_txq *txq = dev->data->tx_queues[i]; - struct pp2_ppio_outq_statistics tx_stats; - - if (!txq) - continue; - - idx = txq->queue_id; - if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { - RTE_LOG(ERR, PMD, - "tx queue %d stats out of range (0 - %d)\n", - idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); - } - - ret = pp2_ppio_outq_get_statistics(priv->ppio, idx, - &tx_stats, 0); - if (unlikely(ret)) { - RTE_LOG(ERR, PMD, - "Failed to update tx queue %d stats\n", idx); - break; - } - - stats->q_opackets[idx] = tx_stats.deq_desc; - stats->q_obytes[idx] = txq->bytes_sent; - stats->obytes += txq->bytes_sent; - } - - ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); - if (unlikely(ret)) { - RTE_LOG(ERR, PMD, "Failed to update port statistics\n"); - return ret; - } - - stats->ipackets += ppio_stats.rx_packets - drop_mac; - stats->opackets += ppio_stats.tx_packets; - stats->imissed += ppio_stats.rx_fullq_dropped + - ppio_stats.rx_bm_dropped + - ppio_stats.rx_early_dropped + - ppio_stats.rx_fifo_dropped + - ppio_stats.rx_cls_dropped; - stats->ierrors = drop_mac; - - return 0; -} - -/** - * DPDK callback to clear device statistics. - * - * @param dev - * Pointer to Ethernet device structure. - */ -static void -mrvl_stats_reset(struct rte_eth_dev *dev) -{ - struct mrvl_priv *priv = dev->data->dev_private; - int i; - - if (!priv->ppio) - return; - - for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct mrvl_rxq *rxq = dev->data->rx_queues[i]; - - pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc, - priv->rxq_map[i].inq, NULL, 1); - rxq->bytes_recv = 0; - rxq->drop_mac = 0; - } - - for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct mrvl_txq *txq = dev->data->tx_queues[i]; - - pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1); - txq->bytes_sent = 0; - } - - pp2_ppio_get_statistics(priv->ppio, NULL, 1); -} - -/** - * DPDK callback to get information about the device. - * - * @param dev - * Pointer to Ethernet device structure (unused). - * @param info - * Info structure output buffer. - */ -static void -mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused, - struct rte_eth_dev_info *info) -{ - info->speed_capa = ETH_LINK_SPEED_10M | - ETH_LINK_SPEED_100M | - ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_10G; - - info->max_rx_queues = MRVL_PP2_RXQ_MAX; - info->max_tx_queues = MRVL_PP2_TXQ_MAX; - info->max_mac_addrs = MRVL_MAC_ADDRS_MAX; - - info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX; - info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN; - info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN; - - info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX; - info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN; - info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; - - info->rx_offload_capa = MRVL_RX_OFFLOADS; - info->rx_queue_offload_capa = MRVL_RX_OFFLOADS; - - info->tx_offload_capa = MRVL_TX_OFFLOADS; - info->tx_queue_offload_capa = MRVL_TX_OFFLOADS; - - info->flow_type_rss_offloads = ETH_RSS_IPV4 | - ETH_RSS_NONFRAG_IPV4_TCP | - ETH_RSS_NONFRAG_IPV4_UDP; - - /* By default packets are dropped if no descriptors are available */ - info->default_rxconf.rx_drop_en = 1; - info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP; - - info->max_rx_pktlen = MRVL_PKT_SIZE_MAX; -} - -/** - * Return supported packet types. - * - * @param dev - * Pointer to Ethernet device structure (unused). - * - * @return - * Const pointer to the table with supported packet types. - */ -static const uint32_t * -mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) -{ - static const uint32_t ptypes[] = { - RTE_PTYPE_L2_ETHER, - RTE_PTYPE_L3_IPV4, - RTE_PTYPE_L3_IPV4_EXT, - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, - RTE_PTYPE_L3_IPV6, - RTE_PTYPE_L3_IPV6_EXT, - RTE_PTYPE_L2_ETHER_ARP, - RTE_PTYPE_L4_TCP, - RTE_PTYPE_L4_UDP - }; - - return ptypes; -} - -/** - * DPDK callback to get information about specific receive queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param rx_queue_id - * Receive queue index. - * @param qinfo - * Receive queue information structure. - */ -static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, - struct rte_eth_rxq_info *qinfo) -{ - struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id]; - struct mrvl_priv *priv = dev->data->dev_private; - int inq = priv->rxq_map[rx_queue_id].inq; - int tc = priv->rxq_map[rx_queue_id].tc; - struct pp2_ppio_tc_params *tc_params = - &priv->ppio_params.inqs_params.tcs_params[tc]; - - qinfo->mp = q->mp; - qinfo->nb_desc = tc_params->inqs_params[inq].size; -} - -/** - * DPDK callback to get information about specific transmit queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param tx_queue_id - * Transmit queue index. - * @param qinfo - * Transmit queue information structure. - */ -static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, - struct rte_eth_txq_info *qinfo) -{ - struct mrvl_priv *priv = dev->data->dev_private; - - qinfo->nb_desc = - priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; -} - -/** - * DPDK callback to Configure a VLAN filter. - * - * @param dev - * Pointer to Ethernet device structure. - * @param vlan_id - * VLAN ID to filter. - * @param on - * Toggle filter. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) -{ - struct mrvl_priv *priv = dev->data->dev_private; - - if (!priv->ppio) - return -EPERM; - - return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) : - pp2_ppio_remove_vlan(priv->ppio, vlan_id); -} - -/** - * Release buffers to hardware bpool (buffer-pool) - * - * @param rxq - * Receive queue pointer. - * @param num - * Number of buffers to release to bpool. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) -{ - struct buff_release_entry entries[MRVL_PP2_TXD_MAX]; - struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX]; - int i, ret; - unsigned int core_id; - struct pp2_hif *hif; - struct pp2_bpool *bpool; - - core_id = rte_lcore_id(); - if (core_id == LCORE_ID_ANY) - core_id = 0; - - hif = mrvl_get_hif(rxq->priv, core_id); - if (!hif) - return -1; - - bpool = rxq->priv->bpool; - - ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num); - if (ret) - return ret; - - if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID) - cookie_addr_high = - (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK; - - for (i = 0; i < num; i++) { - if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK) - != cookie_addr_high) { - RTE_LOG(ERR, PMD, - "mbuf virtual addr high 0x%lx out of range\n", - (uint64_t)mbufs[i] >> 32); - goto out; - } - - entries[i].buff.addr = - rte_mbuf_data_iova_default(mbufs[i]); - entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i]; - entries[i].bpool = bpool; - } - - pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i); - mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i; - - if (i != num) - goto out; - - return 0; -out: - for (; i < num; i++) - rte_pktmbuf_free(mbufs[i]); - - return -1; -} - -/** - * Check whether requested rx queue offloads match port offloads. - * - * @param - * dev Pointer to the device. - * @param - * requested Bitmap of the requested offloads. - * - * @return - * 1 if requested offloads are okay, 0 otherwise. - */ -static int -mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested) -{ - uint64_t mandatory = dev->data->dev_conf.rxmode.offloads; - uint64_t supported = MRVL_RX_OFFLOADS; - uint64_t unsupported = requested & ~supported; - uint64_t missing = mandatory & ~requested; - - if (unsupported) { - RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. " - "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n", - requested, supported); - return 0; - } - - if (missing) { - RTE_LOG(ERR, PMD, "Some Rx offloads are missing. " - "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n", - requested, missing); - return 0; - } - - return 1; -} - -/** - * DPDK callback to configure the receive queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param idx - * RX queue index. - * @param desc - * Number of descriptors to configure in queue. - * @param socket - * NUMA socket on which memory must be allocated. - * @param conf - * Thresholds parameters. - * @param mp - * Memory pool for buffer allocations. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, - unsigned int socket, - const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) -{ - struct mrvl_priv *priv = dev->data->dev_private; - struct mrvl_rxq *rxq; - uint32_t min_size, - max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; - int ret, tc, inq; - - if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads)) - return -ENOTSUP; - - if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) { - /* - * Unknown TC mapping, mapping will not have a correct queue. - */ - RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n", - idx, priv->ppio_id); - return -EFAULT; - } - - min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM - - MRVL_PKT_EFFEC_OFFS; - if (min_size < max_rx_pkt_len) { - RTE_LOG(ERR, PMD, - "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n", - max_rx_pkt_len + RTE_PKTMBUF_HEADROOM + - MRVL_PKT_EFFEC_OFFS, - max_rx_pkt_len); - return -EINVAL; - } - - if (dev->data->rx_queues[idx]) { - rte_free(dev->data->rx_queues[idx]); - dev->data->rx_queues[idx] = NULL; - } - - rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); - if (!rxq) - return -ENOMEM; - - rxq->priv = priv; - rxq->mp = mp; - rxq->cksum_enabled = - dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM; - rxq->queue_id = idx; - rxq->port_id = dev->data->port_id; - mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; - - tc = priv->rxq_map[rxq->queue_id].tc, - inq = priv->rxq_map[rxq->queue_id].inq; - priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size = - desc; - - ret = mrvl_fill_bpool(rxq, desc); - if (ret) { - rte_free(rxq); - return ret; - } - - priv->bpool_init_size += desc; - - dev->data->rx_queues[idx] = rxq; - - return 0; -} - -/** - * DPDK callback to release the receive queue. - * - * @param rxq - * Generic receive queue pointer. - */ -static void -mrvl_rx_queue_release(void *rxq) -{ - struct mrvl_rxq *q = rxq; - struct pp2_ppio_tc_params *tc_params; - int i, num, tc, inq; - struct pp2_hif *hif; - unsigned int core_id = rte_lcore_id(); - - if (core_id == LCORE_ID_ANY) - core_id = 0; - - hif = mrvl_get_hif(q->priv, core_id); - - if (!q || !hif) - return; - - tc = q->priv->rxq_map[q->queue_id].tc; - inq = q->priv->rxq_map[q->queue_id].inq; - tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc]; - num = tc_params->inqs_params[inq].size; - for (i = 0; i < num; i++) { - struct pp2_buff_inf inf; - uint64_t addr; - - pp2_bpool_get_buff(hif, q->priv->bpool, &inf); - addr = cookie_addr_high | inf.cookie; - rte_pktmbuf_free((struct rte_mbuf *)addr); - } - - rte_free(q); -} - -/** - * Check whether requested tx queue offloads match port offloads. - * - * @param - * dev Pointer to the device. - * @param - * requested Bitmap of the requested offloads. - * - * @return - * 1 if requested offloads are okay, 0 otherwise. - */ -static int -mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested) -{ - uint64_t mandatory = dev->data->dev_conf.txmode.offloads; - uint64_t supported = MRVL_TX_OFFLOADS; - uint64_t unsupported = requested & ~supported; - uint64_t missing = mandatory & ~requested; - - if (unsupported) { - RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. " - "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n", - requested, supported); - return 0; - } - - if (missing) { - RTE_LOG(ERR, PMD, "Some Rx offloads are missing. " - "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n", - requested, missing); - return 0; - } - - return 1; -} - -/** - * DPDK callback to configure the transmit queue. - * - * @param dev - * Pointer to Ethernet device structure. - * @param idx - * Transmit queue index. - * @param desc - * Number of descriptors to configure in the queue. - * @param socket - * NUMA socket on which memory must be allocated. - * @param conf - * Thresholds parameters. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, - unsigned int socket, - const struct rte_eth_txconf *conf) -{ - struct mrvl_priv *priv = dev->data->dev_private; - struct mrvl_txq *txq; - - if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads)) - return -ENOTSUP; - - if (dev->data->tx_queues[idx]) { - rte_free(dev->data->tx_queues[idx]); - dev->data->tx_queues[idx] = NULL; - } - - txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); - if (!txq) - return -ENOMEM; - - txq->priv = priv; - txq->queue_id = idx; - txq->port_id = dev->data->port_id; - dev->data->tx_queues[idx] = txq; - - priv->ppio_params.outqs_params.outqs_params[idx].size = desc; - priv->ppio_params.outqs_params.outqs_params[idx].weight = 1; - - return 0; -} - -/** - * DPDK callback to release the transmit queue. - * - * @param txq - * Generic transmit queue pointer. - */ -static void -mrvl_tx_queue_release(void *txq) -{ - struct mrvl_txq *q = txq; - - if (!q) - return; - - rte_free(q); -} - -/** - * Update RSS hash configuration - * - * @param dev - * Pointer to Ethernet device structure. - * @param rss_conf - * Pointer to RSS configuration. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_rss_hash_update(struct rte_eth_dev *dev, - struct rte_eth_rss_conf *rss_conf) -{ - struct mrvl_priv *priv = dev->data->dev_private; - - return mrvl_configure_rss(priv, rss_conf); -} - -/** - * DPDK callback to get RSS hash configuration. - * - * @param dev - * Pointer to Ethernet device structure. - * @rss_conf - * Pointer to RSS configuration. - * - * @return - * Always 0. - */ -static int -mrvl_rss_hash_conf_get(struct rte_eth_dev *dev, - struct rte_eth_rss_conf *rss_conf) -{ - struct mrvl_priv *priv = dev->data->dev_private; - enum pp2_ppio_hash_type hash_type = - priv->ppio_params.inqs_params.hash_type; - - rss_conf->rss_key = NULL; - - if (hash_type == PP2_PPIO_HASH_T_NONE) - rss_conf->rss_hf = 0; - else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE) - rss_conf->rss_hf = ETH_RSS_IPV4; - else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp) - rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP; - else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp) - rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP; - - return 0; -} - -static const struct eth_dev_ops mrvl_ops = { - .dev_configure = mrvl_dev_configure, - .dev_start = mrvl_dev_start, - .dev_stop = mrvl_dev_stop, - .dev_set_link_up = mrvl_dev_set_link_up, - .dev_set_link_down = mrvl_dev_set_link_down, - .dev_close = mrvl_dev_close, - .link_update = mrvl_link_update, - .promiscuous_enable = mrvl_promiscuous_enable, - .allmulticast_enable = mrvl_allmulticast_enable, - .promiscuous_disable = mrvl_promiscuous_disable, - .allmulticast_disable = mrvl_allmulticast_disable, - .mac_addr_remove = mrvl_mac_addr_remove, - .mac_addr_add = mrvl_mac_addr_add, - .mac_addr_set = mrvl_mac_addr_set, - .mtu_set = mrvl_mtu_set, - .stats_get = mrvl_stats_get, - .stats_reset = mrvl_stats_reset, - .dev_infos_get = mrvl_dev_infos_get, - .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get, - .rxq_info_get = mrvl_rxq_info_get, - .txq_info_get = mrvl_txq_info_get, - .vlan_filter_set = mrvl_vlan_filter_set, - .rx_queue_setup = mrvl_rx_queue_setup, - .rx_queue_release = mrvl_rx_queue_release, - .tx_queue_setup = mrvl_tx_queue_setup, - .tx_queue_release = mrvl_tx_queue_release, - .rss_hash_update = mrvl_rss_hash_update, - .rss_hash_conf_get = mrvl_rss_hash_conf_get, -}; - -/** - * Return packet type information and l3/l4 offsets. - * - * @param desc - * Pointer to the received packet descriptor. - * @param l3_offset - * l3 packet offset. - * @param l4_offset - * l4 packet offset. - * - * @return - * Packet type information. - */ -static inline uint64_t -mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, - uint8_t *l3_offset, uint8_t *l4_offset) -{ - enum pp2_inq_l3_type l3_type; - enum pp2_inq_l4_type l4_type; - uint64_t packet_type; - - pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); - pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); - - packet_type = RTE_PTYPE_L2_ETHER; - - switch (l3_type) { - case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: - packet_type |= RTE_PTYPE_L3_IPV4; - break; - case PP2_INQ_L3_TYPE_IPV4_OK: - packet_type |= RTE_PTYPE_L3_IPV4_EXT; - break; - case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: - packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; - break; - case PP2_INQ_L3_TYPE_IPV6_NO_EXT: - packet_type |= RTE_PTYPE_L3_IPV6; - break; - case PP2_INQ_L3_TYPE_IPV6_EXT: - packet_type |= RTE_PTYPE_L3_IPV6_EXT; - break; - case PP2_INQ_L3_TYPE_ARP: - packet_type |= RTE_PTYPE_L2_ETHER_ARP; - /* - * In case of ARP l4_offset is set to wrong value. - * Set it to proper one so that later on mbuf->l3_len can be - * calculated subtracting l4_offset and l3_offset. - */ - *l4_offset = *l3_offset + MRVL_ARP_LENGTH; - break; - default: - RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n"); - break; - } - - switch (l4_type) { - case PP2_INQ_L4_TYPE_TCP: - packet_type |= RTE_PTYPE_L4_TCP; - break; - case PP2_INQ_L4_TYPE_UDP: - packet_type |= RTE_PTYPE_L4_UDP; - break; - default: - RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n"); - break; - } - - return packet_type; -} - -/** - * Get offload information from the received packet descriptor. - * - * @param desc - * Pointer to the received packet descriptor. - * - * @return - * Mbuf offload flags. - */ -static inline uint64_t -mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc) -{ - uint64_t flags; - enum pp2_inq_desc_status status; - - status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); - if (unlikely(status != PP2_DESC_ERR_OK)) - flags = PKT_RX_IP_CKSUM_BAD; - else - flags = PKT_RX_IP_CKSUM_GOOD; - - status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); - if (unlikely(status != PP2_DESC_ERR_OK)) - flags |= PKT_RX_L4_CKSUM_BAD; - else - flags |= PKT_RX_L4_CKSUM_GOOD; - - return flags; -} - -/** - * DPDK callback for receive. - * - * @param rxq - * Generic pointer to the receive queue. - * @param rx_pkts - * Array to store received packets. - * @param nb_pkts - * Maximum number of packets in array. - * - * @return - * Number of packets successfully received. - */ -static uint16_t -mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) -{ - struct mrvl_rxq *q = rxq; - struct pp2_ppio_desc descs[nb_pkts]; - struct pp2_bpool *bpool; - int i, ret, rx_done = 0; - int num; - struct pp2_hif *hif; - unsigned int core_id = rte_lcore_id(); - - hif = mrvl_get_hif(q->priv, core_id); - - if (unlikely(!q->priv->ppio || !hif)) - return 0; - - bpool = q->priv->bpool; - - ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, - q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); - if (unlikely(ret < 0)) { - RTE_LOG(ERR, PMD, "Failed to receive packets\n"); - return 0; - } - mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; - - for (i = 0; i < nb_pkts; i++) { - struct rte_mbuf *mbuf; - uint8_t l3_offset, l4_offset; - enum pp2_inq_desc_status status; - uint64_t addr; - - if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { - struct pp2_ppio_desc *pref_desc; - u64 pref_addr; - - pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT]; - pref_addr = cookie_addr_high | - pp2_ppio_inq_desc_get_cookie(pref_desc); - rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr)); - rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr)); - } - - addr = cookie_addr_high | - pp2_ppio_inq_desc_get_cookie(&descs[i]); - mbuf = (struct rte_mbuf *)addr; - rte_pktmbuf_reset(mbuf); - - /* drop packet in case of mac, overrun or resource error */ - status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]); - if (unlikely(status != PP2_DESC_ERR_OK)) { - struct pp2_buff_inf binf = { - .addr = rte_mbuf_data_iova_default(mbuf), - .cookie = (pp2_cookie_t)(uint64_t)mbuf, - }; - - pp2_bpool_put_buff(hif, bpool, &binf); - mrvl_port_bpool_size - [bpool->pp2_id][bpool->id][core_id]++; - q->drop_mac++; - continue; - } - - mbuf->data_off += MRVL_PKT_EFFEC_OFFS; - mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); - mbuf->data_len = mbuf->pkt_len; - mbuf->port = q->port_id; - mbuf->packet_type = - mrvl_desc_to_packet_type_and_offset(&descs[i], - &l3_offset, - &l4_offset); - mbuf->l2_len = l3_offset; - mbuf->l3_len = l4_offset - l3_offset; - - if (likely(q->cksum_enabled)) - mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]); - - rx_pkts[rx_done++] = mbuf; - q->bytes_recv += mbuf->pkt_len; - } - - if (rte_spinlock_trylock(&q->priv->lock) == 1) { - num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id); - - if (unlikely(num <= q->priv->bpool_min_size || - (!rx_done && num < q->priv->bpool_init_size))) { - ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE); - if (ret) - RTE_LOG(ERR, PMD, "Failed to fill bpool\n"); - } else if (unlikely(num > q->priv->bpool_max_size)) { - int i; - int pkt_to_remove = num - q->priv->bpool_init_size; - struct rte_mbuf *mbuf; - struct pp2_buff_inf buff; - - RTE_LOG(DEBUG, PMD, - "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n", - bpool->pp2_id, q->priv->ppio->port_id, - bpool->id, pkt_to_remove, num, - q->priv->bpool_init_size); - - for (i = 0; i < pkt_to_remove; i++) { - ret = pp2_bpool_get_buff(hif, bpool, &buff); - if (ret) - break; - mbuf = (struct rte_mbuf *) - (cookie_addr_high | buff.cookie); - rte_pktmbuf_free(mbuf); - } - mrvl_port_bpool_size - [bpool->pp2_id][bpool->id][core_id] -= i; - } - rte_spinlock_unlock(&q->priv->lock); - } - - return rx_done; -} - -/** - * Prepare offload information. - * - * @param ol_flags - * Offload flags. - * @param packet_type - * Packet type bitfield. - * @param l3_type - * Pointer to the pp2_ouq_l3_type structure. - * @param l4_type - * Pointer to the pp2_outq_l4_type structure. - * @param gen_l3_cksum - * Will be set to 1 in case l3 checksum is computed. - * @param l4_cksum - * Will be set to 1 in case l4 checksum is computed. - * - * @return - * 0 on success, negative error value otherwise. - */ -static inline int -mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type, - enum pp2_outq_l3_type *l3_type, - enum pp2_outq_l4_type *l4_type, - int *gen_l3_cksum, - int *gen_l4_cksum) -{ - /* - * Based on ol_flags prepare information - * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor - * for offloading. - */ - if (ol_flags & PKT_TX_IPV4) { - *l3_type = PP2_OUTQ_L3_TYPE_IPV4; - *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; - } else if (ol_flags & PKT_TX_IPV6) { - *l3_type = PP2_OUTQ_L3_TYPE_IPV6; - /* no checksum for ipv6 header */ - *gen_l3_cksum = 0; - } else { - /* if something different then stop processing */ - return -1; - } - - ol_flags &= PKT_TX_L4_MASK; - if ((packet_type & RTE_PTYPE_L4_TCP) && - ol_flags == PKT_TX_TCP_CKSUM) { - *l4_type = PP2_OUTQ_L4_TYPE_TCP; - *gen_l4_cksum = 1; - } else if ((packet_type & RTE_PTYPE_L4_UDP) && - ol_flags == PKT_TX_UDP_CKSUM) { - *l4_type = PP2_OUTQ_L4_TYPE_UDP; - *gen_l4_cksum = 1; - } else { - *l4_type = PP2_OUTQ_L4_TYPE_OTHER; - /* no checksum for other type */ - *gen_l4_cksum = 0; - } - - return 0; -} - -/** - * Release already sent buffers to bpool (buffer-pool). - * - * @param ppio - * Pointer to the port structure. - * @param hif - * Pointer to the MUSDK hardware interface. - * @param sq - * Pointer to the shadow queue. - * @param qid - * Queue id number. - * @param force - * Force releasing packets. - */ -static inline void -mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif, - unsigned int core_id, struct mrvl_shadow_txq *sq, - int qid, int force) -{ - struct buff_release_entry *entry; - uint16_t nb_done = 0, num = 0, skip_bufs = 0; - int i; - - pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done); - - sq->num_to_release += nb_done; - - if (likely(!force && - sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE)) - return; - - nb_done = sq->num_to_release; - sq->num_to_release = 0; - - for (i = 0; i < nb_done; i++) { - entry = &sq->ent[sq->tail + num]; - if (unlikely(!entry->buff.addr)) { - RTE_LOG(ERR, PMD, - "Shadow memory @%d: cookie(%lx), pa(%lx)!\n", - sq->tail, (u64)entry->buff.cookie, - (u64)entry->buff.addr); - skip_bufs = 1; - goto skip; - } - - if (unlikely(!entry->bpool)) { - struct rte_mbuf *mbuf; - - mbuf = (struct rte_mbuf *) - (cookie_addr_high | entry->buff.cookie); - rte_pktmbuf_free(mbuf); - skip_bufs = 1; - goto skip; - } - - mrvl_port_bpool_size - [entry->bpool->pp2_id][entry->bpool->id][core_id]++; - num++; - if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE)) - goto skip; - continue; -skip: - if (likely(num)) - pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); - num += skip_bufs; - sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; - sq->size -= num; - num = 0; - skip_bufs = 0; - } - - if (likely(num)) { - pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); - sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; - sq->size -= num; - } -} - -/** - * DPDK callback for transmit. - * - * @param txq - * Generic pointer transmit queue. - * @param tx_pkts - * Packets to transmit. - * @param nb_pkts - * Number of packets in array. - * - * @return - * Number of packets successfully transmitted. - */ -static uint16_t -mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) -{ - struct mrvl_txq *q = txq; - struct mrvl_shadow_txq *sq; - struct pp2_hif *hif; - struct pp2_ppio_desc descs[nb_pkts]; - unsigned int core_id = rte_lcore_id(); - int i, ret, bytes_sent = 0; - uint16_t num, sq_free_size; - uint64_t addr; - - hif = mrvl_get_hif(q->priv, core_id); - sq = &q->shadow_txqs[core_id]; - - if (unlikely(!q->priv->ppio || !hif)) - return 0; - - if (sq->size) - mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, - sq, q->queue_id, 0); - - sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; - if (unlikely(nb_pkts > sq_free_size)) { - RTE_LOG(DEBUG, PMD, - "No room in shadow queue for %d packets! %d packets will be sent.\n", - nb_pkts, sq_free_size); - nb_pkts = sq_free_size; - } - - for (i = 0; i < nb_pkts; i++) { - struct rte_mbuf *mbuf = tx_pkts[i]; - int gen_l3_cksum, gen_l4_cksum; - enum pp2_outq_l3_type l3_type; - enum pp2_outq_l4_type l4_type; - - if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { - struct rte_mbuf *pref_pkt_hdr; - - pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; - rte_mbuf_prefetch_part1(pref_pkt_hdr); - rte_mbuf_prefetch_part2(pref_pkt_hdr); - } - - sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf; - sq->ent[sq->head].buff.addr = - rte_mbuf_data_iova_default(mbuf); - sq->ent[sq->head].bpool = - (unlikely(mbuf->port >= RTE_MAX_ETHPORTS || - mbuf->refcnt > 1)) ? NULL : - mrvl_port_to_bpool_lookup[mbuf->port]; - sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; - sq->size++; - - pp2_ppio_outq_desc_reset(&descs[i]); - pp2_ppio_outq_desc_set_phys_addr(&descs[i], - rte_pktmbuf_iova(mbuf)); - pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0); - pp2_ppio_outq_desc_set_pkt_len(&descs[i], - rte_pktmbuf_pkt_len(mbuf)); - - bytes_sent += rte_pktmbuf_pkt_len(mbuf); - /* - * in case unsupported ol_flags were passed - * do not update descriptor offload information - */ - ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type, - &l3_type, &l4_type, &gen_l3_cksum, - &gen_l4_cksum); - if (unlikely(ret)) - continue; - - pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, - mbuf->l2_len, - mbuf->l2_len + mbuf->l3_len, - gen_l3_cksum, gen_l4_cksum); - } - - num = nb_pkts; - pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts); - /* number of packets that were not sent */ - if (unlikely(num > nb_pkts)) { - for (i = nb_pkts; i < num; i++) { - sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & - MRVL_PP2_TX_SHADOWQ_MASK; - addr = cookie_addr_high | sq->ent[sq->head].buff.cookie; - bytes_sent -= - rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); - } - sq->size -= num - nb_pkts; - } - - q->bytes_sent += bytes_sent; - - return nb_pkts; -} - -/** - * Initialize packet processor. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_init_pp2(void) -{ - struct pp2_init_params init_params; - - memset(&init_params, 0, sizeof(init_params)); - init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED; - init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED; - init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED; - - return pp2_init(&init_params); -} - -/** - * Deinitialize packet processor. - * - * @return - * 0 on success, negative error value otherwise. - */ -static void -mrvl_deinit_pp2(void) -{ - pp2_deinit(); -} - -/** - * Create private device structure. - * - * @param dev_name - * Pointer to the port name passed in the initialization parameters. - * - * @return - * Pointer to the newly allocated private device structure. - */ -static struct mrvl_priv * -mrvl_priv_create(const char *dev_name) -{ - struct pp2_bpool_params bpool_params; - char match[MRVL_MATCH_LEN]; - struct mrvl_priv *priv; - int ret, bpool_bit; - - priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id()); - if (!priv) - return NULL; - - ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name, - &priv->pp_id, &priv->ppio_id); - if (ret) - goto out_free_priv; - - bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id], - PP2_BPOOL_NUM_POOLS); - if (bpool_bit < 0) - goto out_free_priv; - priv->bpool_bit = bpool_bit; - - snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id, - priv->bpool_bit); - memset(&bpool_params, 0, sizeof(bpool_params)); - bpool_params.match = match; - bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS; - ret = pp2_bpool_init(&bpool_params, &priv->bpool); - if (ret) - goto out_clear_bpool_bit; - - priv->ppio_params.type = PP2_PPIO_T_NIC; - rte_spinlock_init(&priv->lock); - - return priv; -out_clear_bpool_bit: - used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); -out_free_priv: - rte_free(priv); - return NULL; -} - -/** - * Create device representing Ethernet port. - * - * @param name - * Pointer to the port's name. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name) -{ - int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); - struct rte_eth_dev *eth_dev; - struct mrvl_priv *priv; - struct ifreq req; - - eth_dev = rte_eth_dev_allocate(name); - if (!eth_dev) - return -ENOMEM; - - priv = mrvl_priv_create(name); - if (!priv) { - ret = -ENOMEM; - goto out_free_dev; - } - - eth_dev->data->mac_addrs = - rte_zmalloc("mac_addrs", - ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0); - if (!eth_dev->data->mac_addrs) { - RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n"); - ret = -ENOMEM; - goto out_free_priv; - } - - memset(&req, 0, sizeof(req)); - strcpy(req.ifr_name, name); - ret = ioctl(fd, SIOCGIFHWADDR, &req); - if (ret) - goto out_free_mac; - - memcpy(eth_dev->data->mac_addrs[0].addr_bytes, - req.ifr_addr.sa_data, ETHER_ADDR_LEN); - - eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst; - eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst; - eth_dev->data->kdrv = RTE_KDRV_NONE; - eth_dev->data->dev_private = priv; - eth_dev->device = &vdev->device; - eth_dev->dev_ops = &mrvl_ops; - - return 0; -out_free_mac: - rte_free(eth_dev->data->mac_addrs); -out_free_dev: - rte_eth_dev_release_port(eth_dev); -out_free_priv: - rte_free(priv); - - return ret; -} - -/** - * Cleanup previously created device representing Ethernet port. - * - * @param name - * Pointer to the port name. - */ -static void -mrvl_eth_dev_destroy(const char *name) -{ - struct rte_eth_dev *eth_dev; - struct mrvl_priv *priv; - - eth_dev = rte_eth_dev_allocated(name); - if (!eth_dev) - return; - - priv = eth_dev->data->dev_private; - pp2_bpool_deinit(priv->bpool); - used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); - rte_free(priv); - rte_free(eth_dev->data->mac_addrs); - rte_eth_dev_release_port(eth_dev); -} - -/** - * Callback used by rte_kvargs_process() during argument parsing. - * - * @param key - * Pointer to the parsed key (unused). - * @param value - * Pointer to the parsed value. - * @param extra_args - * Pointer to the extra arguments which contains address of the - * table of pointers to parsed interface names. - * - * @return - * Always 0. - */ -static int -mrvl_get_ifnames(const char *key __rte_unused, const char *value, - void *extra_args) -{ - struct mrvl_ifnames *ifnames = extra_args; - - ifnames->names[ifnames->idx++] = value; - - return 0; -} - -/** - * Deinitialize per-lcore MUSDK hardware interfaces (hifs). - */ -static void -mrvl_deinit_hifs(void) -{ - int i; - - for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) { - if (hifs[i]) - pp2_hif_deinit(hifs[i]); - } - used_hifs = MRVL_MUSDK_HIFS_RESERVED; - memset(hifs, 0, sizeof(hifs)); -} - -/** - * DPDK callback to register the virtual device. - * - * @param vdev - * Pointer to the virtual device. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -rte_pmd_mrvl_probe(struct rte_vdev_device *vdev) -{ - struct rte_kvargs *kvlist; - struct mrvl_ifnames ifnames; - int ret = -EINVAL; - uint32_t i, ifnum, cfgnum; - const char *params; - - params = rte_vdev_device_args(vdev); - if (!params) - return -EINVAL; - - kvlist = rte_kvargs_parse(params, valid_args); - if (!kvlist) - return -EINVAL; - - ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG); - if (ifnum > RTE_DIM(ifnames.names)) - goto out_free_kvlist; - - ifnames.idx = 0; - rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG, - mrvl_get_ifnames, &ifnames); - - - /* - * The below system initialization should be done only once, - * on the first provided configuration file - */ - if (!mrvl_qos_cfg) { - cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG); - RTE_LOG(INFO, PMD, "Parsing config file!\n"); - if (cfgnum > 1) { - RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n"); - goto out_free_kvlist; - } else if (cfgnum == 1) { - rte_kvargs_process(kvlist, MRVL_CFG_ARG, - mrvl_get_qoscfg, &mrvl_qos_cfg); - } - } - - if (mrvl_dev_num) - goto init_devices; - - RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n"); - /* - * ret == -EEXIST is correct, it means DMA - * has been already initialized (by another PMD). - */ - ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); - if (ret < 0) { - if (ret != -EEXIST) - goto out_free_kvlist; - else - RTE_LOG(INFO, PMD, - "DMA memory has been already initialized by a different driver.\n"); - } - - ret = mrvl_init_pp2(); - if (ret) { - RTE_LOG(ERR, PMD, "Failed to init PP!\n"); - goto out_deinit_dma; - } - - memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size)); - memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup)); - - mrvl_lcore_first = RTE_MAX_LCORE; - mrvl_lcore_last = 0; - -init_devices: - for (i = 0; i < ifnum; i++) { - RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]); - ret = mrvl_eth_dev_create(vdev, ifnames.names[i]); - if (ret) - goto out_cleanup; - } - mrvl_dev_num += ifnum; - - rte_kvargs_free(kvlist); - - return 0; -out_cleanup: - for (; i > 0; i--) - mrvl_eth_dev_destroy(ifnames.names[i]); - - if (mrvl_dev_num == 0) - mrvl_deinit_pp2(); -out_deinit_dma: - if (mrvl_dev_num == 0) - mv_sys_dma_mem_destroy(); -out_free_kvlist: - rte_kvargs_free(kvlist); - - return ret; -} - -/** - * DPDK callback to remove virtual device. - * - * @param vdev - * Pointer to the removed virtual device. - * - * @return - * 0 on success, negative error value otherwise. - */ -static int -rte_pmd_mrvl_remove(struct rte_vdev_device *vdev) -{ - int i; - const char *name; - - name = rte_vdev_device_name(vdev); - if (!name) - return -EINVAL; - - RTE_LOG(INFO, PMD, "Removing %s\n", name); - - for (i = 0; i < rte_eth_dev_count(); i++) { - char ifname[RTE_ETH_NAME_MAX_LEN]; - - rte_eth_dev_get_name_by_port(i, ifname); - mrvl_eth_dev_destroy(ifname); - mrvl_dev_num--; - } - - if (mrvl_dev_num == 0) { - RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n"); - mrvl_deinit_hifs(); - mrvl_deinit_pp2(); - mv_sys_dma_mem_destroy(); - } - - return 0; -} - -static struct rte_vdev_driver pmd_mrvl_drv = { - .probe = rte_pmd_mrvl_probe, - .remove = rte_pmd_mrvl_remove, -}; - -RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv); -RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl); diff --git a/drivers/net/mrvl/mrvl_ethdev.h b/drivers/net/mrvl/mrvl_ethdev.h deleted file mode 100644 index f7afae5c..00000000 --- a/drivers/net/mrvl/mrvl_ethdev.h +++ /dev/null @@ -1,118 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MRVL_ETHDEV_H_ -#define _MRVL_ETHDEV_H_ - -#include - -#include -#include -#include -#include -#include -#include - -/** Maximum number of rx queues per port */ -#define MRVL_PP2_RXQ_MAX 32 - -/** Maximum number of tx queues per port */ -#define MRVL_PP2_TXQ_MAX 8 - -/** Minimum number of descriptors in tx queue */ -#define MRVL_PP2_TXD_MIN 16 - -/** Maximum number of descriptors in tx queue */ -#define MRVL_PP2_TXD_MAX 2048 - -/** Tx queue descriptors alignment */ -#define MRVL_PP2_TXD_ALIGN 16 - -/** Minimum number of descriptors in rx queue */ -#define MRVL_PP2_RXD_MIN 16 - -/** Maximum number of descriptors in rx queue */ -#define MRVL_PP2_RXD_MAX 2048 - -/** Rx queue descriptors alignment */ -#define MRVL_PP2_RXD_ALIGN 16 - -/** Maximum number of descriptors in tx aggregated queue */ -#define MRVL_PP2_AGGR_TXQD_MAX 2048 - -/** Maximum number of Traffic Classes. */ -#define MRVL_PP2_TC_MAX 8 - -/** Packet offset inside RX buffer. */ -#define MRVL_PKT_OFFS 64 - -/** Maximum number of descriptors in shadow queue. Must be power of 2 */ -#define MRVL_PP2_TX_SHADOWQ_SIZE MRVL_PP2_TXD_MAX - -/** Shadow queue size mask (since shadow queue size is power of 2) */ -#define MRVL_PP2_TX_SHADOWQ_MASK (MRVL_PP2_TX_SHADOWQ_SIZE - 1) - -/** Minimum number of sent buffers to release from shadow queue to BM */ -#define MRVL_PP2_BUF_RELEASE_BURST_SIZE 64 - -struct mrvl_priv { - /* Hot fields, used in fast path. */ - struct pp2_bpool *bpool; /**< BPool pointer */ - struct pp2_ppio *ppio; /**< Port handler pointer */ - rte_spinlock_t lock; /**< Spinlock for checking bpool status */ - uint16_t bpool_max_size; /**< BPool maximum size */ - uint16_t bpool_min_size; /**< BPool minimum size */ - uint16_t bpool_init_size; /**< Configured BPool size */ - - /** Mapping for DPDK rx queue->(TC, MRVL relative inq) */ - struct { - uint8_t tc; /**< Traffic Class */ - uint8_t inq; /**< Relative in-queue number */ - } rxq_map[MRVL_PP2_RXQ_MAX] __rte_cache_aligned; - - /* Configuration data, used sporadically. */ - uint8_t pp_id; - uint8_t ppio_id; - uint8_t bpool_bit; - uint8_t rss_hf_tcp; - uint8_t uc_mc_flushed; - uint8_t vlan_flushed; - - struct pp2_ppio_params ppio_params; - struct pp2_cls_qos_tbl_params qos_tbl_params; - struct pp2_cls_tbl *qos_tbl; - uint16_t nb_rx_queues; -}; - -#endif /* _MRVL_ETHDEV_H_ */ diff --git a/drivers/net/mrvl/mrvl_qos.c b/drivers/net/mrvl/mrvl_qos.c deleted file mode 100644 index fbb36813..00000000 --- a/drivers/net/mrvl/mrvl_qos.c +++ /dev/null @@ -1,636 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -/* Unluckily, container_of is defined by both DPDK and MUSDK, - * we'll declare only one version. - * - * Note that it is not used in this PMD anyway. - */ -#ifdef container_of -#undef container_of -#endif - -#include "mrvl_qos.h" - -/* Parsing tokens. Defined conveniently, so that any correction is easy. */ -#define MRVL_TOK_DEFAULT "default" -#define MRVL_TOK_DEFAULT_TC "default_tc" -#define MRVL_TOK_DSCP "dscp" -#define MRVL_TOK_MAPPING_PRIORITY "mapping_priority" -#define MRVL_TOK_IP "ip" -#define MRVL_TOK_IP_VLAN "ip/vlan" -#define MRVL_TOK_PCP "pcp" -#define MRVL_TOK_PORT "port" -#define MRVL_TOK_RXQ "rxq" -#define MRVL_TOK_SP "SP" -#define MRVL_TOK_TC "tc" -#define MRVL_TOK_TXQ "txq" -#define MRVL_TOK_VLAN "vlan" -#define MRVL_TOK_VLAN_IP "vlan/ip" -#define MRVL_TOK_WEIGHT "weight" - -/** Number of tokens in range a-b = 2. */ -#define MAX_RNG_TOKENS 2 - -/** Maximum possible value of PCP. */ -#define MAX_PCP 7 - -/** Maximum possible value of DSCP. */ -#define MAX_DSCP 63 - -/** Global QoS configuration. */ -struct mrvl_qos_cfg *mrvl_qos_cfg; - -/** - * Convert string to uint32_t with extra checks for result correctness. - * - * @param string String to convert. - * @param val Conversion result. - * @returns 0 in case of success, negative value otherwise. - */ -static int -get_val_securely(const char *string, uint32_t *val) -{ - char *endptr; - size_t len = strlen(string); - - if (len == 0) - return -1; - - errno = 0; - *val = strtoul(string, &endptr, 0); - if (errno != 0 || RTE_PTR_DIFF(endptr, string) != len) - return -2; - - return 0; -} - -/** - * Read out-queue configuration from file. - * - * @param file Path to the configuration file. - * @param port Port number. - * @param outq Out queue number. - * @param cfg Pointer to the Marvell QoS configuration structure. - * @returns 0 in case of success, negative value otherwise. - */ -static int -get_outq_cfg(struct rte_cfgfile *file, int port, int outq, - struct mrvl_qos_cfg *cfg) -{ - char sec_name[32]; - const char *entry; - uint32_t val; - - snprintf(sec_name, sizeof(sec_name), "%s %d %s %d", - MRVL_TOK_PORT, port, MRVL_TOK_TXQ, outq); - - /* Skip non-existing */ - if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0) - return 0; - - entry = rte_cfgfile_get_entry(file, sec_name, - MRVL_TOK_WEIGHT); - if (entry) { - if (get_val_securely(entry, &val) < 0) - return -1; - cfg->port[port].outq[outq].weight = (uint8_t)val; - } - - return 0; -} - -/** - * Gets multiple-entry values and places them in table. - * - * Entry can be anything, e.g. "1 2-3 5 6 7-9". This needs to be converted to - * table entries, respectively: {1, 2, 3, 5, 6, 7, 8, 9}. - * As all result table's elements are always 1-byte long, we - * won't overcomplicate the function, but we'll keep API generic, - * check if someone hasn't changed element size and make it simple - * to extend to other sizes. - * - * This function is purely utilitary, it does not print any error, only returns - * different error numbers. - * - * @param entry[in] Values string to parse. - * @param tab[out] Results table. - * @param elem_sz[in] Element size (in bytes). - * @param max_elems[in] Number of results table elements available. - * @param max val[in] Maximum value allowed. - * @returns Number of correctly parsed elements in case of success. - * @retval -1 Wrong element size. - * @retval -2 More tokens than result table allows. - * @retval -3 Wrong range syntax. - * @retval -4 Wrong range values. - * @retval -5 Maximum value exceeded. - */ -static int -get_entry_values(const char *entry, uint8_t *tab, - size_t elem_sz, uint8_t max_elems, uint8_t max_val) -{ - /* There should not be more tokens than max elements. - * Add 1 for error trap. - */ - char *tokens[max_elems + 1]; - - /* Begin, End + error trap = 3. */ - char *rng_tokens[MAX_RNG_TOKENS + 1]; - long beg, end; - uint32_t token_val; - int nb_tokens, nb_rng_tokens; - int i; - int values = 0; - char val; - char entry_cpy[CFG_VALUE_LEN]; - - if (elem_sz != 1) - return -1; - - /* Copy the entry to safely use rte_strsplit(). */ - snprintf(entry_cpy, RTE_DIM(entry_cpy), "%s", entry); - - /* - * If there are more tokens than array size, rte_strsplit will - * not return error, just array size. - */ - nb_tokens = rte_strsplit(entry_cpy, strlen(entry_cpy), - tokens, max_elems + 1, ' '); - - /* Quick check, will be refined later. */ - if (nb_tokens > max_elems) - return -2; - - for (i = 0; i < nb_tokens; ++i) { - if (strchr(tokens[i], '-') != NULL) { - /* - * Split to begin and end tokens. - * We want to catch error cases too, thus we leave - * option for number of tokens to be more than 2. - */ - nb_rng_tokens = rte_strsplit(tokens[i], - strlen(tokens[i]), rng_tokens, - RTE_DIM(rng_tokens), '-'); - if (nb_rng_tokens != 2) - return -3; - - /* Range and sanity checks. */ - if (get_val_securely(rng_tokens[0], &token_val) < 0) - return -4; - beg = (char)token_val; - if (get_val_securely(rng_tokens[1], &token_val) < 0) - return -4; - end = (char)token_val; - if (beg < 0 || beg > UCHAR_MAX || - end < 0 || end > UCHAR_MAX || end < beg) - return -4; - - for (val = beg; val <= end; ++val) { - if (val > max_val) - return -5; - - *tab = val; - tab = RTE_PTR_ADD(tab, elem_sz); - ++values; - if (values >= max_elems) - return -2; - } - } else { - /* Single values. */ - if (get_val_securely(tokens[i], &token_val) < 0) - return -5; - val = (char)token_val; - if (val > max_val) - return -5; - - *tab = val; - tab = RTE_PTR_ADD(tab, elem_sz); - ++values; - if (values >= max_elems) - return -2; - } - } - - return values; -} - -/** - * Parse Traffic Class'es mapping configuration. - * - * @param file Config file handle. - * @param port Which port to look for. - * @param tc Which Traffic Class to look for. - * @param cfg[out] Parsing results. - * @returns 0 in case of success, negative value otherwise. - */ -static int -parse_tc_cfg(struct rte_cfgfile *file, int port, int tc, - struct mrvl_qos_cfg *cfg) -{ - char sec_name[32]; - const char *entry; - int n; - - snprintf(sec_name, sizeof(sec_name), "%s %d %s %d", - MRVL_TOK_PORT, port, MRVL_TOK_TC, tc); - - /* Skip non-existing */ - if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0) - return 0; - - entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RXQ); - if (entry) { - n = get_entry_values(entry, - cfg->port[port].tc[tc].inq, - sizeof(cfg->port[port].tc[tc].inq[0]), - RTE_DIM(cfg->port[port].tc[tc].inq), - MRVL_PP2_RXQ_MAX); - if (n < 0) { - RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n", - n, entry); - return n; - } - cfg->port[port].tc[tc].inqs = n; - } - - entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PCP); - if (entry) { - n = get_entry_values(entry, - cfg->port[port].tc[tc].pcp, - sizeof(cfg->port[port].tc[tc].pcp[0]), - RTE_DIM(cfg->port[port].tc[tc].pcp), - MAX_PCP); - if (n < 0) { - RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n", - n, entry); - return n; - } - cfg->port[port].tc[tc].pcps = n; - } - - entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_DSCP); - if (entry) { - n = get_entry_values(entry, - cfg->port[port].tc[tc].dscp, - sizeof(cfg->port[port].tc[tc].dscp[0]), - RTE_DIM(cfg->port[port].tc[tc].dscp), - MAX_DSCP); - if (n < 0) { - RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n", - n, entry); - return n; - } - cfg->port[port].tc[tc].dscps = n; - } - return 0; -} - -/** - * Parse QoS configuration - rte_kvargs_process handler. - * - * Opens configuration file and parses its content. - * - * @param key Unused. - * @param path Path to config file. - * @param extra_args Pointer to configuration structure. - * @returns 0 in case of success, exits otherwise. - */ -int -mrvl_get_qoscfg(const char *key __rte_unused, const char *path, - void *extra_args) -{ - struct mrvl_qos_cfg **cfg = extra_args; - struct rte_cfgfile *file = rte_cfgfile_load(path, 0); - uint32_t val; - int n, i, ret; - const char *entry; - char sec_name[32]; - - if (file == NULL) - rte_exit(EXIT_FAILURE, "Cannot load configuration %s\n", path); - - /* Create configuration. This is never accessed on the fast path, - * so we can ignore socket. - */ - *cfg = rte_zmalloc("mrvl_qos_cfg", sizeof(struct mrvl_qos_cfg), 0); - if (*cfg == NULL) - rte_exit(EXIT_FAILURE, "Cannot allocate configuration %s\n", - path); - - n = rte_cfgfile_num_sections(file, MRVL_TOK_PORT, - sizeof(MRVL_TOK_PORT) - 1); - - if (n == 0) { - /* This is weird, but not bad. */ - RTE_LOG(WARNING, PMD, "Empty configuration file?\n"); - return 0; - } - - /* Use the number of ports given as vdev parameters. */ - for (n = 0; n < (PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC); ++n) { - snprintf(sec_name, sizeof(sec_name), "%s %d %s", - MRVL_TOK_PORT, n, MRVL_TOK_DEFAULT); - - /* Skip ports non-existing in configuration. */ - if (rte_cfgfile_num_sections(file, sec_name, - strlen(sec_name)) <= 0) { - (*cfg)->port[n].use_global_defaults = 1; - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_VLAN_IP_PRI; - continue; - } - - entry = rte_cfgfile_get_entry(file, sec_name, - MRVL_TOK_DEFAULT_TC); - if (entry) { - if (get_val_securely(entry, &val) < 0 || - val > USHRT_MAX) - return -1; - (*cfg)->port[n].default_tc = (uint8_t)val; - } else { - RTE_LOG(ERR, PMD, - "Default Traffic Class required in custom configuration!\n"); - return -1; - } - - entry = rte_cfgfile_get_entry(file, sec_name, - MRVL_TOK_MAPPING_PRIORITY); - if (entry) { - if (!strncmp(entry, MRVL_TOK_VLAN_IP, - sizeof(MRVL_TOK_VLAN_IP))) - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_VLAN_IP_PRI; - else if (!strncmp(entry, MRVL_TOK_IP_VLAN, - sizeof(MRVL_TOK_IP_VLAN))) - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_IP_VLAN_PRI; - else if (!strncmp(entry, MRVL_TOK_IP, - sizeof(MRVL_TOK_IP))) - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_IP_PRI; - else if (!strncmp(entry, MRVL_TOK_VLAN, - sizeof(MRVL_TOK_VLAN))) - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_VLAN_PRI; - else - rte_exit(EXIT_FAILURE, - "Error in parsing %s value (%s)!\n", - MRVL_TOK_MAPPING_PRIORITY, entry); - } else { - (*cfg)->port[n].mapping_priority = - PP2_CLS_QOS_TBL_VLAN_IP_PRI; - } - - for (i = 0; i < MRVL_PP2_RXQ_MAX; ++i) { - ret = get_outq_cfg(file, n, i, *cfg); - if (ret < 0) - rte_exit(EXIT_FAILURE, - "Error %d parsing port %d outq %d!\n", - ret, n, i); - } - - for (i = 0; i < MRVL_PP2_TC_MAX; ++i) { - ret = parse_tc_cfg(file, n, i, *cfg); - if (ret < 0) - rte_exit(EXIT_FAILURE, - "Error %d parsing port %d tc %d!\n", - ret, n, i); - } - } - - return 0; -} - -/** - * Setup Traffic Class. - * - * Fill in TC parameters in single MUSDK TC config entry. - * @param param TC parameters entry. - * @param inqs Number of MUSDK in-queues in this TC. - * @param bpool Bpool for this TC. - * @returns 0 in case of success, exits otherwise. - */ -static int -setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs, - struct pp2_bpool *bpool) -{ - struct pp2_ppio_inq_params *inq_params; - - param->pkt_offset = MRVL_PKT_OFFS; - param->pools[0] = bpool; - - inq_params = rte_zmalloc_socket("inq_params", - inqs * sizeof(*inq_params), - 0, rte_socket_id()); - if (!inq_params) - return -ENOMEM; - - param->num_in_qs = inqs; - - /* Release old config if necessary. */ - if (param->inqs_params) - rte_free(param->inqs_params); - - param->inqs_params = inq_params; - - return 0; -} - -/** - * Configure RX Queues in a given port. - * - * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping. - * - * @param priv Port's private data - * @param portid DPDK port ID - * @param max_queues Maximum number of queues to configure. - * @returns 0 in case of success, negative value otherwise. - */ -int -mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid, - uint16_t max_queues) -{ - size_t i, tc; - - if (mrvl_qos_cfg == NULL || - mrvl_qos_cfg->port[portid].use_global_defaults) { - /* No port configuration, use default: 1 TC, no QoS. */ - priv->ppio_params.inqs_params.num_tcs = 1; - setup_tc(&priv->ppio_params.inqs_params.tcs_params[0], - max_queues, priv->bpool); - - /* Direct mapping of queues i.e. 0->0, 1->1 etc. */ - for (i = 0; i < max_queues; ++i) { - priv->rxq_map[i].tc = 0; - priv->rxq_map[i].inq = i; - } - return 0; - } - - /* We need only a subset of configuration. */ - struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid]; - - priv->qos_tbl_params.type = port_cfg->mapping_priority; - - /* - * We need to reverse mapping, from tc->pcp (better from usability - * point of view) to pcp->tc (configurable in MUSDK). - * First, set all map elements to "default". - */ - for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i) - priv->qos_tbl_params.pcp_cos_map[i].tc = port_cfg->default_tc; - - /* Then, fill in all known values. */ - for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { - if (port_cfg->tc[tc].pcps > RTE_DIM(port_cfg->tc[0].pcp)) { - /* Better safe than sorry. */ - RTE_LOG(ERR, PMD, - "Too many PCPs configured in TC %zu!\n", tc); - return -1; - } - for (i = 0; i < port_cfg->tc[tc].pcps; ++i) { - priv->qos_tbl_params.pcp_cos_map[ - port_cfg->tc[tc].pcp[i]].tc = tc; - } - } - - /* - * The same logic goes with DSCP. - * First, set all map elements to "default". - */ - for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i) - priv->qos_tbl_params.dscp_cos_map[i].tc = - port_cfg->default_tc; - - /* Fill in all known values. */ - for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { - if (port_cfg->tc[tc].dscps > RTE_DIM(port_cfg->tc[0].dscp)) { - /* Better safe than sorry. */ - RTE_LOG(ERR, PMD, - "Too many DSCPs configured in TC %zu!\n", tc); - return -1; - } - for (i = 0; i < port_cfg->tc[tc].dscps; ++i) { - priv->qos_tbl_params.dscp_cos_map[ - port_cfg->tc[tc].dscp[i]].tc = tc; - } - } - - /* - * Surprisingly, similar logic goes with queue mapping. - * We need only to store qid->tc mapping, - * to know TC when queue is read. - */ - for (i = 0; i < RTE_DIM(priv->rxq_map); ++i) - priv->rxq_map[i].tc = MRVL_UNKNOWN_TC; - - /* Set up DPDKq->(TC,inq) mapping. */ - for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { - if (port_cfg->tc[tc].inqs > RTE_DIM(port_cfg->tc[0].inq)) { - /* Overflow. */ - RTE_LOG(ERR, PMD, - "Too many RX queues configured per TC %zu!\n", - tc); - return -1; - } - for (i = 0; i < port_cfg->tc[tc].inqs; ++i) { - uint8_t idx = port_cfg->tc[tc].inq[i]; - - if (idx > RTE_DIM(priv->rxq_map)) { - RTE_LOG(ERR, PMD, "Bad queue index %d!\n", idx); - return -1; - } - - priv->rxq_map[idx].tc = tc; - priv->rxq_map[idx].inq = i; - } - } - - /* - * Set up TC configuration. TCs need to be sequenced: 0, 1, 2 - * with no gaps. Empty TC means end of processing. - */ - for (i = 0; i < MRVL_PP2_TC_MAX; ++i) { - if (port_cfg->tc[i].inqs == 0) - break; - setup_tc(&priv->ppio_params.inqs_params.tcs_params[i], - port_cfg->tc[i].inqs, - priv->bpool); - } - - priv->ppio_params.inqs_params.num_tcs = i; - - return 0; -} - -/** - * Start QoS mapping. - * - * Finalize QoS table configuration and initialize it in SDK. It can be done - * only after port is started, so we have a valid ppio reference. - * - * @param priv Port's private (configuration) data. - * @returns 0 in case of success, exits otherwise. - */ -int -mrvl_start_qos_mapping(struct mrvl_priv *priv) -{ - size_t i; - - if (priv->ppio == NULL) { - RTE_LOG(ERR, PMD, "ppio must not be NULL here!\n"); - return -1; - } - - for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i) - priv->qos_tbl_params.pcp_cos_map[i].ppio = priv->ppio; - - for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i) - priv->qos_tbl_params.dscp_cos_map[i].ppio = priv->ppio; - - /* Initialize Classifier QoS table. */ - - return pp2_cls_qos_tbl_init(&priv->qos_tbl_params, &priv->qos_tbl); -} diff --git a/drivers/net/mrvl/mrvl_qos.h b/drivers/net/mrvl/mrvl_qos.h deleted file mode 100644 index ae7508c9..00000000 --- a/drivers/net/mrvl/mrvl_qos.h +++ /dev/null @@ -1,113 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MRVL_QOS_H_ -#define _MRVL_QOS_H_ - -#include - -#include "mrvl_ethdev.h" - -/** Code Points per Traffic Class. Equals max(DSCP, PCP). */ -#define MRVL_CP_PER_TC (64) - -/** Value used as "unknown". */ -#define MRVL_UNKNOWN_TC (0xFF) - -/* QoS config. */ -struct mrvl_qos_cfg { - struct port_cfg { - struct { - uint8_t inq[MRVL_PP2_RXQ_MAX]; - uint8_t dscp[MRVL_CP_PER_TC]; - uint8_t pcp[MRVL_CP_PER_TC]; - uint8_t inqs; - uint8_t dscps; - uint8_t pcps; - } tc[MRVL_PP2_TC_MAX]; - struct { - uint8_t weight; - } outq[MRVL_PP2_RXQ_MAX]; - enum pp2_cls_qos_tbl_type mapping_priority; - uint16_t inqs; - uint16_t outqs; - uint8_t default_tc; - uint8_t use_global_defaults; - } port[RTE_MAX_ETHPORTS]; -}; - -/** Global QoS configuration. */ -extern struct mrvl_qos_cfg *mrvl_qos_cfg; - -/** - * Parse QoS configuration - rte_kvargs_process handler. - * - * Opens configuration file and parses its content. - * - * @param key Unused. - * @param path Path to config file. - * @param extra_args Pointer to configuration structure. - * @returns 0 in case of success, exits otherwise. - */ -int -mrvl_get_qoscfg(const char *key __rte_unused, const char *path, - void *extra_args); - -/** - * Configure RX Queues in a given port. - * - * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping. - * - * @param priv Port's private data - * @param portid DPDK port ID - * @param max_queues Maximum number of queues to configure. - * @returns 0 in case of success, negative value otherwise. - */ -int -mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid, - uint16_t max_queues); - -/** - * Start QoS mapping. - * - * Finalize QoS table configuration and initialize it in SDK. It can be done - * only after port is started, so we have a valid ppio reference. - * - * @param priv Port's private (configuration) data. - * @returns 0 in case of success, exits otherwise. - */ -int -mrvl_start_qos_mapping(struct mrvl_priv *priv); - -#endif /* _MRVL_QOS_H_ */ diff --git a/drivers/net/mrvl/rte_pmd_mrvl_version.map b/drivers/net/mrvl/rte_pmd_mrvl_version.map deleted file mode 100644 index a7530317..00000000 --- a/drivers/net/mrvl/rte_pmd_mrvl_version.map +++ /dev/null @@ -1,3 +0,0 @@ -DPDK_17.11 { - local: *; -}; diff --git a/drivers/net/mvpp2/Makefile b/drivers/net/mvpp2/Makefile new file mode 100644 index 00000000..492aef97 --- /dev/null +++ b/drivers/net/mvpp2/Makefile @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Marvell International Ltd. +# Copyright(c) 2017 Semihalf. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),config) +ifeq ($(LIBMUSDK_PATH),) +$(error "Please define LIBMUSDK_PATH environment variable") +endif +endif +endif + +# library name +LIB = librte_pmd_mvpp2.a + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_pmd_mvpp2_version.map + +# external library dependencies +CFLAGS += -I$(LIBMUSDK_PATH)/include +CFLAGS += -DMVCONF_TYPES_PUBLIC +CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -O3 +LDLIBS += -L$(LIBMUSDK_PATH)/lib +LDLIBS += -lmusdk +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile +LDLIBS += -lrte_bus_vdev + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_qos.c +SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_flow.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/mvpp2/meson.build b/drivers/net/mvpp2/meson.build new file mode 100644 index 00000000..e1398895 --- /dev/null +++ b/drivers/net/mvpp2/meson.build @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Marvell International Ltd. +# Copyright(c) 2018 Semihalf. +# All rights reserved. + +path = get_option('lib_musdk_dir') +lib_dir = path + '/lib' +inc_dir = path + '/include' + +lib = cc.find_library('libmusdk', dirs : [lib_dir], required: false) +if not lib.found() + build = false +else + ext_deps += lib + includes += include_directories(inc_dir) + cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC'] +endif + +sources = files( + 'mrvl_ethdev.c', + 'mrvl_flow.c', + 'mrvl_qos.c' +) + +deps += ['cfgfile'] diff --git a/drivers/net/mvpp2/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c new file mode 100644 index 00000000..ea6a7864 --- /dev/null +++ b/drivers/net/mvpp2/mrvl_ethdev.c @@ -0,0 +1,2747 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +/* Unluckily, container_of is defined by both DPDK and MUSDK, + * we'll declare only one version. + * + * Note that it is not used in this PMD anyway. + */ +#ifdef container_of +#undef container_of +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mrvl_ethdev.h" +#include "mrvl_qos.h" + +/* bitmask with reserved hifs */ +#define MRVL_MUSDK_HIFS_RESERVED 0x0F +/* bitmask with reserved bpools */ +#define MRVL_MUSDK_BPOOLS_RESERVED 0x07 +/* bitmask with reserved kernel RSS tables */ +#define MRVL_MUSDK_RSS_RESERVED 0x01 +/* maximum number of available hifs */ +#define MRVL_MUSDK_HIFS_MAX 9 + +/* prefetch shift */ +#define MRVL_MUSDK_PREFETCH_SHIFT 2 + +/* TCAM has 25 entries reserved for uc/mc filter entries */ +#define MRVL_MAC_ADDRS_MAX 25 +#define MRVL_MATCH_LEN 16 +#define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE) +/* Maximum allowable packet size */ +#define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE) + +#define MRVL_IFACE_NAME_ARG "iface" +#define MRVL_CFG_ARG "cfg" + +#define MRVL_BURST_SIZE 64 + +#define MRVL_ARP_LENGTH 28 + +#define MRVL_COOKIE_ADDR_INVALID ~0ULL + +#define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8) +#define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT) + +/* Memory size (in bytes) for MUSDK dma buffers */ +#define MRVL_MUSDK_DMA_MEMSIZE 41943040 + +/** Port Rx offload capabilities */ +#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_JUMBO_FRAME | \ + DEV_RX_OFFLOAD_CRC_STRIP | \ + DEV_RX_OFFLOAD_CHECKSUM) + +/** Port Tx offloads capabilities */ +#define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM) + +static const char * const valid_args[] = { + MRVL_IFACE_NAME_ARG, + MRVL_CFG_ARG, + NULL +}; + +static int used_hifs = MRVL_MUSDK_HIFS_RESERVED; +static struct pp2_hif *hifs[RTE_MAX_LCORE]; +static int used_bpools[PP2_NUM_PKT_PROC] = { + MRVL_MUSDK_BPOOLS_RESERVED, + MRVL_MUSDK_BPOOLS_RESERVED +}; + +struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS]; +int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE]; +uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID; + +struct mrvl_ifnames { + const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC]; + int idx; +}; + +/* + * To use buffer harvesting based on loopback port shadow queue structure + * was introduced for buffers information bookkeeping. + * + * Before sending the packet, related buffer information (pp2_buff_inf) is + * stored in shadow queue. After packet is transmitted no longer used + * packet buffer is released back to it's original hardware pool, + * on condition it originated from interface. + * In case it was generated by application itself i.e: mbuf->port field is + * 0xff then its released to software mempool. + */ +struct mrvl_shadow_txq { + int head; /* write index - used when sending buffers */ + int tail; /* read index - used when releasing buffers */ + u16 size; /* queue occupied size */ + u16 num_to_release; /* number of buffers sent, that can be released */ + struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */ +}; + +struct mrvl_rxq { + struct mrvl_priv *priv; + struct rte_mempool *mp; + int queue_id; + int port_id; + int cksum_enabled; + uint64_t bytes_recv; + uint64_t drop_mac; +}; + +struct mrvl_txq { + struct mrvl_priv *priv; + int queue_id; + int port_id; + uint64_t bytes_sent; + struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE]; + int tx_deferred_start; +}; + +static int mrvl_lcore_first; +static int mrvl_lcore_last; +static int mrvl_dev_num; + +static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num); +static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio, + struct pp2_hif *hif, unsigned int core_id, + struct mrvl_shadow_txq *sq, int qid, int force); + +#define MRVL_XSTATS_TBL_ENTRY(name) { \ + #name, offsetof(struct pp2_ppio_statistics, name), \ + sizeof(((struct pp2_ppio_statistics *)0)->name) \ +} + +/* Table with xstats data */ +static struct { + const char *name; + unsigned int offset; + unsigned int size; +} mrvl_xstats_tbl[] = { + MRVL_XSTATS_TBL_ENTRY(rx_bytes), + MRVL_XSTATS_TBL_ENTRY(rx_packets), + MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets), + MRVL_XSTATS_TBL_ENTRY(rx_errors), + MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_early_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped), + MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped), + MRVL_XSTATS_TBL_ENTRY(tx_bytes), + MRVL_XSTATS_TBL_ENTRY(tx_packets), + MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets), + MRVL_XSTATS_TBL_ENTRY(tx_errors) +}; + +static inline int +mrvl_get_bpool_size(int pp2_id, int pool_id) +{ + int i; + int size = 0; + + for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) + size += mrvl_port_bpool_size[pp2_id][pool_id][i]; + + return size; +} + +static inline int +mrvl_reserve_bit(int *bitmap, int max) +{ + int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap); + + if (n >= max) + return -1; + + *bitmap |= 1 << n; + + return n; +} + +static int +mrvl_init_hif(int core_id) +{ + struct pp2_hif_params params; + char match[MRVL_MATCH_LEN]; + int ret; + + ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX); + if (ret < 0) { + RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id); + return ret; + } + + snprintf(match, sizeof(match), "hif-%d", ret); + memset(¶ms, 0, sizeof(params)); + params.match = match; + params.out_size = MRVL_PP2_AGGR_TXQD_MAX; + ret = pp2_hif_init(¶ms, &hifs[core_id]); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id); + return ret; + } + + return 0; +} + +static inline struct pp2_hif* +mrvl_get_hif(struct mrvl_priv *priv, int core_id) +{ + int ret; + + if (likely(hifs[core_id] != NULL)) + return hifs[core_id]; + + rte_spinlock_lock(&priv->lock); + + ret = mrvl_init_hif(core_id); + if (ret < 0) { + RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id); + goto out; + } + + if (core_id < mrvl_lcore_first) + mrvl_lcore_first = core_id; + + if (core_id > mrvl_lcore_last) + mrvl_lcore_last = core_id; +out: + rte_spinlock_unlock(&priv->lock); + + return hifs[core_id]; +} + +/** + * Configure rss based on dpdk rss configuration. + * + * @param priv + * Pointer to private structure. + * @param rss_conf + * Pointer to RSS configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf) +{ + if (rss_conf->rss_key) + RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n"); + + if (rss_conf->rss_hf == 0) { + priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; + } else if (rss_conf->rss_hf & ETH_RSS_IPV4) { + priv->ppio_params.inqs_params.hash_type = + PP2_PPIO_HASH_T_2_TUPLE; + } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { + priv->ppio_params.inqs_params.hash_type = + PP2_PPIO_HASH_T_5_TUPLE; + priv->rss_hf_tcp = 1; + } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { + priv->ppio_params.inqs_params.hash_type = + PP2_PPIO_HASH_T_5_TUPLE; + priv->rss_hf_tcp = 0; + } else { + return -EINVAL; + } + + return 0; +} + +/** + * Ethernet device configuration. + * + * Prepare the driver for a given number of TX and RX queues and + * configure RSS. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_dev_configure(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE && + dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) { + RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n", + dev->data->dev_conf.rxmode.mq_mode); + return -EINVAL; + } + + if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { + RTE_LOG(INFO, PMD, + "L2 CRC stripping is always enabled in hw\n"); + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + } + + if (dev->data->dev_conf.rxmode.split_hdr_size) { + RTE_LOG(INFO, PMD, "Split headers not supported\n"); + return -EINVAL; + } + + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len - + ETHER_HDR_LEN - ETHER_CRC_LEN; + + ret = mrvl_configure_rxqs(priv, dev->data->port_id, + dev->data->nb_rx_queues); + if (ret < 0) + return ret; + + ret = mrvl_configure_txqs(priv, dev->data->port_id, + dev->data->nb_tx_queues); + if (ret < 0) + return ret; + + priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues; + priv->ppio_params.maintain_stats = 1; + priv->nb_rx_queues = dev->data->nb_rx_queues; + + if (dev->data->nb_rx_queues == 1 && + dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { + RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n"); + priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE; + + return 0; + } + + return mrvl_configure_rss(priv, + &dev->data->dev_conf.rx_adv_conf.rss_conf); +} + +/** + * DPDK callback to change the MTU. + * + * Setting the MTU affects hardware MRU (packets larger than the MRU + * will be dropped). + * + * @param dev + * Pointer to Ethernet device structure. + * @param mtu + * New MTU. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct mrvl_priv *priv = dev->data->dev_private; + /* extra MV_MH_SIZE bytes are required for Marvell tag */ + uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN; + int ret; + + if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) + return -EINVAL; + + if (!priv->ppio) + return 0; + + ret = pp2_ppio_set_mru(priv->ppio, mru); + if (ret) + return ret; + + return pp2_ppio_set_mtu(priv->ppio, mtu); +} + +/** + * DPDK callback to bring the link up. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return -EPERM; + + ret = pp2_ppio_enable(priv->ppio); + if (ret) + return ret; + + /* + * mtu/mru can be updated if pp2_ppio_enable() was called at least once + * as pp2_ppio_enable() changes port->t_mode from default 0 to + * PP2_TRAFFIC_INGRESS_EGRESS. + * + * Set mtu to default DPDK value here. + */ + ret = mrvl_mtu_set(dev, dev->data->mtu); + if (ret) + pp2_ppio_disable(priv->ppio); + + return ret; +} + +/** + * DPDK callback to bring the link down. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!priv->ppio) + return -EPERM; + + return pp2_ppio_disable(priv->ppio); +} + +/** + * DPDK callback to start tx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param queue_id + * Transmit queue index. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv) + return -EPERM; + + /* passing 1 enables given tx queue */ + ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to start txq %d\n", queue_id); + return ret; + } + + dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/** + * DPDK callback to stop tx queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param queue_id + * Transmit queue index. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return -EPERM; + + /* passing 0 disables given tx queue */ + ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to stop txq %d\n", queue_id); + return ret; + } + + dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +/** + * DPDK callback to start the device. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mrvl_dev_start(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + char match[MRVL_MATCH_LEN]; + int ret = 0, i, def_init_size; + + snprintf(match, sizeof(match), "ppio-%d:%d", + priv->pp_id, priv->ppio_id); + priv->ppio_params.match = match; + + /* + * Calculate the minimum bpool size for refill feature as follows: + * 2 default burst sizes multiply by number of rx queues. + * If the bpool size will be below this value, new buffers will + * be added to the pool. + */ + priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2; + + /* In case initial bpool size configured in queues setup is + * smaller than minimum size add more buffers + */ + def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2; + if (priv->bpool_init_size < def_init_size) { + int buffs_to_add = def_init_size - priv->bpool_init_size; + + priv->bpool_init_size += buffs_to_add; + ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add); + if (ret) + RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n"); + } + + /* + * Calculate the maximum bpool size for refill feature as follows: + * maximum number of descriptors in rx queue multiply by number + * of rx queues plus minimum bpool size. + * In case the bpool size will exceed this value, superfluous buffers + * will be removed + */ + priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) + + priv->bpool_min_size; + + ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to init ppio\n"); + return ret; + } + + /* + * In case there are some some stale uc/mc mac addresses flush them + * here. It cannot be done during mrvl_dev_close() as port information + * is already gone at that point (due to pp2_ppio_deinit() in + * mrvl_dev_stop()). + */ + if (!priv->uc_mc_flushed) { + ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1); + if (ret) { + RTE_LOG(ERR, PMD, + "Failed to flush uc/mc filter list\n"); + goto out; + } + priv->uc_mc_flushed = 1; + } + + if (!priv->vlan_flushed) { + ret = pp2_ppio_flush_vlan(priv->ppio); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to flush vlan list\n"); + /* + * TODO + * once pp2_ppio_flush_vlan() is supported jump to out + * goto out; + */ + } + priv->vlan_flushed = 1; + } + + /* For default QoS config, don't start classifier. */ + if (mrvl_qos_cfg) { + ret = mrvl_start_qos_mapping(priv); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n"); + goto out; + } + } + + ret = mrvl_dev_set_link_up(dev); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to set link up\n"); + goto out; + } + + /* start tx queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct mrvl_txq *txq = dev->data->tx_queues[i]; + + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + if (!txq->tx_deferred_start) + continue; + + /* + * All txqs are started by default. Stop them + * so that tx_deferred_start works as expected. + */ + ret = mrvl_tx_queue_stop(dev, i); + if (ret) + goto out; + } + + return 0; +out: + RTE_LOG(ERR, PMD, "Failed to start device\n"); + pp2_ppio_deinit(priv->ppio); + return ret; +} + +/** + * Flush receive queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_flush_rx_queues(struct rte_eth_dev *dev) +{ + int i; + + RTE_LOG(INFO, PMD, "Flushing rx queues\n"); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + int ret, num; + + do { + struct mrvl_rxq *q = dev->data->rx_queues[i]; + struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX]; + + num = MRVL_PP2_RXD_MAX; + ret = pp2_ppio_recv(q->priv->ppio, + q->priv->rxq_map[q->queue_id].tc, + q->priv->rxq_map[q->queue_id].inq, + descs, (uint16_t *)&num); + } while (ret == 0 && num); + } +} + +/** + * Flush transmit shadow queues. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev) +{ + int i, j; + struct mrvl_txq *txq; + + RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n"); + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = (struct mrvl_txq *)dev->data->tx_queues[i]; + + for (j = 0; j < RTE_MAX_LCORE; j++) { + struct mrvl_shadow_txq *sq; + + if (!hifs[j]) + continue; + + sq = &txq->shadow_txqs[j]; + mrvl_free_sent_buffers(txq->priv->ppio, + hifs[j], j, sq, txq->queue_id, 1); + while (sq->tail != sq->head) { + uint64_t addr = cookie_addr_high | + sq->ent[sq->tail].buff.cookie; + rte_pktmbuf_free( + (struct rte_mbuf *)addr); + sq->tail = (sq->tail + 1) & + MRVL_PP2_TX_SHADOWQ_MASK; + } + memset(sq, 0, sizeof(*sq)); + } + } +} + +/** + * Flush hardware bpool (buffer-pool). + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_flush_bpool(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_hif *hif; + uint32_t num; + int ret; + unsigned int core_id = rte_lcore_id(); + + if (core_id == LCORE_ID_ANY) + core_id = 0; + + hif = mrvl_get_hif(priv, core_id); + + ret = pp2_bpool_get_num_buffs(priv->bpool, &num); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n"); + return; + } + + while (num--) { + struct pp2_buff_inf inf; + uint64_t addr; + + ret = pp2_bpool_get_buff(hif, priv->bpool, &inf); + if (ret) + break; + + addr = cookie_addr_high | inf.cookie; + rte_pktmbuf_free((struct rte_mbuf *)addr); + } +} + +/** + * DPDK callback to stop the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_dev_stop(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + mrvl_dev_set_link_down(dev); + mrvl_flush_rx_queues(dev); + mrvl_flush_tx_shadow_queues(dev); + if (priv->cls_tbl) { + pp2_cls_tbl_deinit(priv->cls_tbl); + priv->cls_tbl = NULL; + } + if (priv->qos_tbl) { + pp2_cls_qos_tbl_deinit(priv->qos_tbl); + priv->qos_tbl = NULL; + } + if (priv->ppio) + pp2_ppio_deinit(priv->ppio); + priv->ppio = NULL; + + /* policer must be released after ppio deinitialization */ + if (priv->policer) { + pp2_cls_plcr_deinit(priv->policer); + priv->policer = NULL; + } +} + +/** + * DPDK callback to close the device. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_dev_close(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + size_t i; + + for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) { + struct pp2_ppio_tc_params *tc_params = + &priv->ppio_params.inqs_params.tcs_params[i]; + + if (tc_params->inqs_params) { + rte_free(tc_params->inqs_params); + tc_params->inqs_params = NULL; + } + } + + mrvl_flush_bpool(dev); +} + +/** + * DPDK callback to retrieve physical link information. + * + * @param dev + * Pointer to Ethernet device structure. + * @param wait_to_complete + * Wait for request completion (ignored). + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) +{ + /* + * TODO + * once MUSDK provides necessary API use it here + */ + struct mrvl_priv *priv = dev->data->dev_private; + struct ethtool_cmd edata; + struct ifreq req; + int ret, fd, link_up; + + if (!priv->ppio) + return -EPERM; + + edata.cmd = ETHTOOL_GSET; + + strcpy(req.ifr_name, dev->data->name); + req.ifr_data = (void *)&edata; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd == -1) + return -EFAULT; + + ret = ioctl(fd, SIOCETHTOOL, &req); + if (ret == -1) { + close(fd); + return -EFAULT; + } + + close(fd); + + switch (ethtool_cmd_speed(&edata)) { + case SPEED_10: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M; + break; + case SPEED_100: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M; + break; + case SPEED_1000: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G; + break; + case SPEED_10000: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; + break; + default: + dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; + } + + dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX : + ETH_LINK_HALF_DUPLEX; + dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG : + ETH_LINK_FIXED; + pp2_ppio_get_link_state(priv->ppio, &link_up); + dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN; + + return 0; +} + +/** + * DPDK callback to enable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return; + + if (priv->isolated) + return; + + ret = pp2_ppio_set_promisc(priv->ppio, 1); + if (ret) + RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n"); +} + +/** + * DPDK callback to enable allmulti mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return; + + if (priv->isolated) + return; + + ret = pp2_ppio_set_mc_promisc(priv->ppio, 1); + if (ret) + RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n"); +} + +/** + * DPDK callback to disable promiscuous mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return; + + ret = pp2_ppio_set_promisc(priv->ppio, 0); + if (ret) + RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n"); +} + +/** + * DPDK callback to disable allmulticast mode. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return; + + ret = pp2_ppio_set_mc_promisc(priv->ppio, 0); + if (ret) + RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n"); +} + +/** + * DPDK callback to remove a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param index + * MAC address index. + */ +static void +mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) +{ + struct mrvl_priv *priv = dev->data->dev_private; + char buf[ETHER_ADDR_FMT_SIZE]; + int ret; + + if (!priv->ppio) + return; + + if (priv->isolated) + return; + + ret = pp2_ppio_remove_mac_addr(priv->ppio, + dev->data->mac_addrs[index].addr_bytes); + if (ret) { + ether_format_addr(buf, sizeof(buf), + &dev->data->mac_addrs[index]); + RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf); + } +} + +/** + * DPDK callback to add a MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * @param index + * MAC address index. + * @param vmdq + * VMDq pool index to associate address with (unused). + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t vmdq __rte_unused) +{ + struct mrvl_priv *priv = dev->data->dev_private; + char buf[ETHER_ADDR_FMT_SIZE]; + int ret; + + if (priv->isolated) + return -ENOTSUP; + + if (index == 0) + /* For setting index 0, mrvl_mac_addr_set() should be used.*/ + return -1; + + if (!priv->ppio) + return 0; + + /* + * Maximum number of uc addresses can be tuned via kernel module mvpp2x + * parameter uc_filter_max. Maximum number of mc addresses is then + * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and + * 21 respectively. + * + * If more than uc_filter_max uc addresses were added to filter list + * then NIC will switch to promiscuous mode automatically. + * + * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses + * were added to filter list then NIC will switch to all-multicast mode + * automatically. + */ + ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes); + if (ret) { + ether_format_addr(buf, sizeof(buf), mac_addr); + RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf); + return -1; + } + + return 0; +} + +/** + * DPDK callback to set the primary MAC address. + * + * @param dev + * Pointer to Ethernet device structure. + * @param mac_addr + * MAC address to register. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret; + + if (!priv->ppio) + return 0; + + if (priv->isolated) + return -ENOTSUP; + + ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes); + if (ret) { + char buf[ETHER_ADDR_FMT_SIZE]; + ether_format_addr(buf, sizeof(buf), mac_addr); + RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf); + } + + return ret; +} + +/** + * DPDK callback to get device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param stats + * Stats structure output buffer. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_ppio_statistics ppio_stats; + uint64_t drop_mac = 0; + unsigned int i, idx, ret; + + if (!priv->ppio) + return -EPERM; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct mrvl_rxq *rxq = dev->data->rx_queues[i]; + struct pp2_ppio_inq_statistics rx_stats; + + if (!rxq) + continue; + + idx = rxq->queue_id; + if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { + RTE_LOG(ERR, PMD, + "rx queue %d stats out of range (0 - %d)\n", + idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + continue; + } + + ret = pp2_ppio_inq_get_statistics(priv->ppio, + priv->rxq_map[idx].tc, + priv->rxq_map[idx].inq, + &rx_stats, 0); + if (unlikely(ret)) { + RTE_LOG(ERR, PMD, + "Failed to update rx queue %d stats\n", idx); + break; + } + + stats->q_ibytes[idx] = rxq->bytes_recv; + stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac; + stats->q_errors[idx] = rx_stats.drop_early + + rx_stats.drop_fullq + + rx_stats.drop_bm + + rxq->drop_mac; + stats->ibytes += rxq->bytes_recv; + drop_mac += rxq->drop_mac; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct mrvl_txq *txq = dev->data->tx_queues[i]; + struct pp2_ppio_outq_statistics tx_stats; + + if (!txq) + continue; + + idx = txq->queue_id; + if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) { + RTE_LOG(ERR, PMD, + "tx queue %d stats out of range (0 - %d)\n", + idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + } + + ret = pp2_ppio_outq_get_statistics(priv->ppio, idx, + &tx_stats, 0); + if (unlikely(ret)) { + RTE_LOG(ERR, PMD, + "Failed to update tx queue %d stats\n", idx); + break; + } + + stats->q_opackets[idx] = tx_stats.deq_desc; + stats->q_obytes[idx] = txq->bytes_sent; + stats->obytes += txq->bytes_sent; + } + + ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); + if (unlikely(ret)) { + RTE_LOG(ERR, PMD, "Failed to update port statistics\n"); + return ret; + } + + stats->ipackets += ppio_stats.rx_packets - drop_mac; + stats->opackets += ppio_stats.tx_packets; + stats->imissed += ppio_stats.rx_fullq_dropped + + ppio_stats.rx_bm_dropped + + ppio_stats.rx_early_dropped + + ppio_stats.rx_fifo_dropped + + ppio_stats.rx_cls_dropped; + stats->ierrors = drop_mac; + + return 0; +} + +/** + * DPDK callback to clear device statistics. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_stats_reset(struct rte_eth_dev *dev) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int i; + + if (!priv->ppio) + return; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct mrvl_rxq *rxq = dev->data->rx_queues[i]; + + pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc, + priv->rxq_map[i].inq, NULL, 1); + rxq->bytes_recv = 0; + rxq->drop_mac = 0; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct mrvl_txq *txq = dev->data->tx_queues[i]; + + pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1); + txq->bytes_sent = 0; + } + + pp2_ppio_get_statistics(priv->ppio, NULL, 1); +} + +/** + * DPDK callback to get extended statistics. + * + * @param dev + * Pointer to Ethernet device structure. + * @param stats + * Pointer to xstats table. + * @param n + * Number of entries in xstats table. + * @return + * Negative value on error, number of read xstats otherwise. + */ +static int +mrvl_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *stats, unsigned int n) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_ppio_statistics ppio_stats; + unsigned int i; + + if (!stats) + return 0; + + pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0); + for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) { + uint64_t val; + + if (mrvl_xstats_tbl[i].size == sizeof(uint32_t)) + val = *(uint32_t *)((uint8_t *)&ppio_stats + + mrvl_xstats_tbl[i].offset); + else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t)) + val = *(uint64_t *)((uint8_t *)&ppio_stats + + mrvl_xstats_tbl[i].offset); + else + return -EINVAL; + + stats[i].id = i; + stats[i].value = val; + } + + return n; +} + +/** + * DPDK callback to reset extended statistics. + * + * @param dev + * Pointer to Ethernet device structure. + */ +static void +mrvl_xstats_reset(struct rte_eth_dev *dev) +{ + mrvl_stats_reset(dev); +} + +/** + * DPDK callback to get extended statistics names. + * + * @param dev (unused) + * Pointer to Ethernet device structure. + * @param xstats_names + * Pointer to xstats names table. + * @param size + * Size of the xstats names table. + * @return + * Number of read names. + */ +static int +mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_xstat_name *xstats_names, + unsigned int size) +{ + unsigned int i; + + if (!xstats_names) + return RTE_DIM(mrvl_xstats_tbl); + + for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++) + snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s", + mrvl_xstats_tbl[i].name); + + return size; +} + +/** + * DPDK callback to get information about the device. + * + * @param dev + * Pointer to Ethernet device structure (unused). + * @param info + * Info structure output buffer. + */ +static void +mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_dev_info *info) +{ + info->speed_capa = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G; + + info->max_rx_queues = MRVL_PP2_RXQ_MAX; + info->max_tx_queues = MRVL_PP2_TXQ_MAX; + info->max_mac_addrs = MRVL_MAC_ADDRS_MAX; + + info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX; + info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN; + info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN; + + info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX; + info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN; + info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; + + info->rx_offload_capa = MRVL_RX_OFFLOADS; + info->rx_queue_offload_capa = MRVL_RX_OFFLOADS; + + info->tx_offload_capa = MRVL_TX_OFFLOADS; + info->tx_queue_offload_capa = MRVL_TX_OFFLOADS; + + info->flow_type_rss_offloads = ETH_RSS_IPV4 | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV4_UDP; + + /* By default packets are dropped if no descriptors are available */ + info->default_rxconf.rx_drop_en = 1; + info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP; + + info->max_rx_pktlen = MRVL_PKT_SIZE_MAX; +} + +/** + * Return supported packet types. + * + * @param dev + * Pointer to Ethernet device structure (unused). + * + * @return + * Const pointer to the table with supported packet types. + */ +static const uint32_t * +mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP + }; + + return ptypes; +} + +/** + * DPDK callback to get information about specific receive queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param rx_queue_id + * Receive queue index. + * @param qinfo + * Receive queue information structure. + */ +static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id]; + struct mrvl_priv *priv = dev->data->dev_private; + int inq = priv->rxq_map[rx_queue_id].inq; + int tc = priv->rxq_map[rx_queue_id].tc; + struct pp2_ppio_tc_params *tc_params = + &priv->ppio_params.inqs_params.tcs_params[tc]; + + qinfo->mp = q->mp; + qinfo->nb_desc = tc_params->inqs_params[inq].size; +} + +/** + * DPDK callback to get information about specific transmit queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param tx_queue_id + * Transmit queue index. + * @param qinfo + * Transmit queue information structure. + */ +static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id]; + + qinfo->nb_desc = + priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +/** + * DPDK callback to Configure a VLAN filter. + * + * @param dev + * Pointer to Ethernet device structure. + * @param vlan_id + * VLAN ID to filter. + * @param on + * Toggle filter. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!priv->ppio) + return -EPERM; + + if (priv->isolated) + return -ENOTSUP; + + return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) : + pp2_ppio_remove_vlan(priv->ppio, vlan_id); +} + +/** + * Release buffers to hardware bpool (buffer-pool) + * + * @param rxq + * Receive queue pointer. + * @param num + * Number of buffers to release to bpool. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_fill_bpool(struct mrvl_rxq *rxq, int num) +{ + struct buff_release_entry entries[MRVL_PP2_RXD_MAX]; + struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX]; + int i, ret; + unsigned int core_id; + struct pp2_hif *hif; + struct pp2_bpool *bpool; + + core_id = rte_lcore_id(); + if (core_id == LCORE_ID_ANY) + core_id = 0; + + hif = mrvl_get_hif(rxq->priv, core_id); + if (!hif) + return -1; + + bpool = rxq->priv->bpool; + + ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num); + if (ret) + return ret; + + if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID) + cookie_addr_high = + (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK; + + for (i = 0; i < num; i++) { + if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK) + != cookie_addr_high) { + RTE_LOG(ERR, PMD, + "mbuf virtual addr high 0x%lx out of range\n", + (uint64_t)mbufs[i] >> 32); + goto out; + } + + entries[i].buff.addr = + rte_mbuf_data_iova_default(mbufs[i]); + entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i]; + entries[i].bpool = bpool; + } + + pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i); + mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i; + + if (i != num) + goto out; + + return 0; +out: + for (; i < num; i++) + rte_pktmbuf_free(mbufs[i]); + + return -1; +} + +/** + * DPDK callback to configure the receive queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, + const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_rxq *rxq; + uint32_t min_size, + max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + int ret, tc, inq; + uint64_t offloads; + + offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + + if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) { + /* + * Unknown TC mapping, mapping will not have a correct queue. + */ + RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n", + idx, priv->ppio_id); + return -EFAULT; + } + + min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM - + MRVL_PKT_EFFEC_OFFS; + if (min_size < max_rx_pkt_len) { + RTE_LOG(ERR, PMD, + "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n", + max_rx_pkt_len + RTE_PKTMBUF_HEADROOM + + MRVL_PKT_EFFEC_OFFS, + max_rx_pkt_len); + return -EINVAL; + } + + if (dev->data->rx_queues[idx]) { + rte_free(dev->data->rx_queues[idx]); + dev->data->rx_queues[idx] = NULL; + } + + rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket); + if (!rxq) + return -ENOMEM; + + rxq->priv = priv; + rxq->mp = mp; + rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM; + rxq->queue_id = idx; + rxq->port_id = dev->data->port_id; + mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; + + tc = priv->rxq_map[rxq->queue_id].tc, + inq = priv->rxq_map[rxq->queue_id].inq; + priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size = + desc; + + ret = mrvl_fill_bpool(rxq, desc); + if (ret) { + rte_free(rxq); + return ret; + } + + priv->bpool_init_size += desc; + + dev->data->rx_queues[idx] = rxq; + + return 0; +} + +/** + * DPDK callback to release the receive queue. + * + * @param rxq + * Generic receive queue pointer. + */ +static void +mrvl_rx_queue_release(void *rxq) +{ + struct mrvl_rxq *q = rxq; + struct pp2_ppio_tc_params *tc_params; + int i, num, tc, inq; + struct pp2_hif *hif; + unsigned int core_id = rte_lcore_id(); + + if (core_id == LCORE_ID_ANY) + core_id = 0; + + hif = mrvl_get_hif(q->priv, core_id); + + if (!q || !hif) + return; + + tc = q->priv->rxq_map[q->queue_id].tc; + inq = q->priv->rxq_map[q->queue_id].inq; + tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc]; + num = tc_params->inqs_params[inq].size; + for (i = 0; i < num; i++) { + struct pp2_buff_inf inf; + uint64_t addr; + + pp2_bpool_get_buff(hif, q->priv->bpool, &inf); + addr = cookie_addr_high | inf.cookie; + rte_pktmbuf_free((struct rte_mbuf *)addr); + } + + rte_free(q); +} + +/** + * DPDK callback to configure the transmit queue. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * Transmit queue index. + * @param desc + * Number of descriptors to configure in the queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param conf + * Tx queue configuration parameters. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, + const struct rte_eth_txconf *conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct mrvl_txq *txq; + + if (dev->data->tx_queues[idx]) { + rte_free(dev->data->tx_queues[idx]); + dev->data->tx_queues[idx] = NULL; + } + + txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket); + if (!txq) + return -ENOMEM; + + txq->priv = priv; + txq->queue_id = idx; + txq->port_id = dev->data->port_id; + txq->tx_deferred_start = conf->tx_deferred_start; + dev->data->tx_queues[idx] = txq; + + priv->ppio_params.outqs_params.outqs_params[idx].size = desc; + + return 0; +} + +/** + * DPDK callback to release the transmit queue. + * + * @param txq + * Generic transmit queue pointer. + */ +static void +mrvl_tx_queue_release(void *txq) +{ + struct mrvl_txq *q = txq; + + if (!q) + return; + + rte_free(q); +} + +/** + * DPDK callback to get flow control configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param fc_conf + * Pointer to the flow control configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + int ret, en; + + if (!priv) + return -EPERM; + + ret = pp2_ppio_get_rx_pause(priv->ppio, &en); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to read rx pause state\n"); + return ret; + } + + fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE; + + return 0; +} + +/** + * DPDK callback to set flow control configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param fc_conf + * Pointer to the flow control configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (!priv) + return -EPERM; + + if (fc_conf->high_water || + fc_conf->low_water || + fc_conf->pause_time || + fc_conf->mac_ctrl_frame_fwd || + fc_conf->autoneg) { + RTE_LOG(ERR, PMD, "Flowctrl parameter is not supported\n"); + + return -EINVAL; + } + + if (fc_conf->mode == RTE_FC_NONE || + fc_conf->mode == RTE_FC_RX_PAUSE) { + int ret, en; + + en = fc_conf->mode == RTE_FC_NONE ? 0 : 1; + ret = pp2_ppio_set_rx_pause(priv->ppio, en); + if (ret) + RTE_LOG(ERR, PMD, + "Failed to change flowctrl on RX side\n"); + + return ret; + } + + return 0; +} + +/** + * Update RSS hash configuration + * + * @param dev + * Pointer to Ethernet device structure. + * @param rss_conf + * Pointer to RSS configuration. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (priv->isolated) + return -ENOTSUP; + + return mrvl_configure_rss(priv, rss_conf); +} + +/** + * DPDK callback to get RSS hash configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @rss_conf + * Pointer to RSS configuration. + * + * @return + * Always 0. + */ +static int +mrvl_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct mrvl_priv *priv = dev->data->dev_private; + enum pp2_ppio_hash_type hash_type = + priv->ppio_params.inqs_params.hash_type; + + rss_conf->rss_key = NULL; + + if (hash_type == PP2_PPIO_HASH_T_NONE) + rss_conf->rss_hf = 0; + else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE) + rss_conf->rss_hf = ETH_RSS_IPV4; + else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp) + rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP; + else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp) + rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP; + + return 0; +} + +/** + * DPDK callback to get rte_flow callbacks. + * + * @param dev + * Pointer to the device structure. + * @param filer_type + * Flow filter type. + * @param filter_op + * Flow filter operation. + * @param arg + * Pointer to pass the flow ops. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &mrvl_flow_ops; + return 0; + default: + RTE_LOG(WARNING, PMD, "Filter type (%d) not supported", + filter_type); + return -EINVAL; + } +} + +static const struct eth_dev_ops mrvl_ops = { + .dev_configure = mrvl_dev_configure, + .dev_start = mrvl_dev_start, + .dev_stop = mrvl_dev_stop, + .dev_set_link_up = mrvl_dev_set_link_up, + .dev_set_link_down = mrvl_dev_set_link_down, + .dev_close = mrvl_dev_close, + .link_update = mrvl_link_update, + .promiscuous_enable = mrvl_promiscuous_enable, + .allmulticast_enable = mrvl_allmulticast_enable, + .promiscuous_disable = mrvl_promiscuous_disable, + .allmulticast_disable = mrvl_allmulticast_disable, + .mac_addr_remove = mrvl_mac_addr_remove, + .mac_addr_add = mrvl_mac_addr_add, + .mac_addr_set = mrvl_mac_addr_set, + .mtu_set = mrvl_mtu_set, + .stats_get = mrvl_stats_get, + .stats_reset = mrvl_stats_reset, + .xstats_get = mrvl_xstats_get, + .xstats_reset = mrvl_xstats_reset, + .xstats_get_names = mrvl_xstats_get_names, + .dev_infos_get = mrvl_dev_infos_get, + .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get, + .rxq_info_get = mrvl_rxq_info_get, + .txq_info_get = mrvl_txq_info_get, + .vlan_filter_set = mrvl_vlan_filter_set, + .tx_queue_start = mrvl_tx_queue_start, + .tx_queue_stop = mrvl_tx_queue_stop, + .rx_queue_setup = mrvl_rx_queue_setup, + .rx_queue_release = mrvl_rx_queue_release, + .tx_queue_setup = mrvl_tx_queue_setup, + .tx_queue_release = mrvl_tx_queue_release, + .flow_ctrl_get = mrvl_flow_ctrl_get, + .flow_ctrl_set = mrvl_flow_ctrl_set, + .rss_hash_update = mrvl_rss_hash_update, + .rss_hash_conf_get = mrvl_rss_hash_conf_get, + .filter_ctrl = mrvl_eth_filter_ctrl, +}; + +/** + * Return packet type information and l3/l4 offsets. + * + * @param desc + * Pointer to the received packet descriptor. + * @param l3_offset + * l3 packet offset. + * @param l4_offset + * l4 packet offset. + * + * @return + * Packet type information. + */ +static inline uint64_t +mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, + uint8_t *l3_offset, uint8_t *l4_offset) +{ + enum pp2_inq_l3_type l3_type; + enum pp2_inq_l4_type l4_type; + uint64_t packet_type; + + pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); + pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); + + packet_type = RTE_PTYPE_L2_ETHER; + + switch (l3_type) { + case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: + packet_type |= RTE_PTYPE_L3_IPV4; + break; + case PP2_INQ_L3_TYPE_IPV4_OK: + packet_type |= RTE_PTYPE_L3_IPV4_EXT; + break; + case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: + packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + break; + case PP2_INQ_L3_TYPE_IPV6_NO_EXT: + packet_type |= RTE_PTYPE_L3_IPV6; + break; + case PP2_INQ_L3_TYPE_IPV6_EXT: + packet_type |= RTE_PTYPE_L3_IPV6_EXT; + break; + case PP2_INQ_L3_TYPE_ARP: + packet_type |= RTE_PTYPE_L2_ETHER_ARP; + /* + * In case of ARP l4_offset is set to wrong value. + * Set it to proper one so that later on mbuf->l3_len can be + * calculated subtracting l4_offset and l3_offset. + */ + *l4_offset = *l3_offset + MRVL_ARP_LENGTH; + break; + default: + RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n"); + break; + } + + switch (l4_type) { + case PP2_INQ_L4_TYPE_TCP: + packet_type |= RTE_PTYPE_L4_TCP; + break; + case PP2_INQ_L4_TYPE_UDP: + packet_type |= RTE_PTYPE_L4_UDP; + break; + default: + RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n"); + break; + } + + return packet_type; +} + +/** + * Get offload information from the received packet descriptor. + * + * @param desc + * Pointer to the received packet descriptor. + * + * @return + * Mbuf offload flags. + */ +static inline uint64_t +mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc) +{ + uint64_t flags; + enum pp2_inq_desc_status status; + + status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); + if (unlikely(status != PP2_DESC_ERR_OK)) + flags = PKT_RX_IP_CKSUM_BAD; + else + flags = PKT_RX_IP_CKSUM_GOOD; + + status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); + if (unlikely(status != PP2_DESC_ERR_OK)) + flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + + return flags; +} + +/** + * DPDK callback for receive. + * + * @param rxq + * Generic pointer to the receive queue. + * @param rx_pkts + * Array to store received packets. + * @param nb_pkts + * Maximum number of packets in array. + * + * @return + * Number of packets successfully received. + */ +static uint16_t +mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct mrvl_rxq *q = rxq; + struct pp2_ppio_desc descs[nb_pkts]; + struct pp2_bpool *bpool; + int i, ret, rx_done = 0; + int num; + struct pp2_hif *hif; + unsigned int core_id = rte_lcore_id(); + + hif = mrvl_get_hif(q->priv, core_id); + + if (unlikely(!q->priv->ppio || !hif)) + return 0; + + bpool = q->priv->bpool; + + ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc, + q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts); + if (unlikely(ret < 0)) { + RTE_LOG(ERR, PMD, "Failed to receive packets\n"); + return 0; + } + mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts; + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf; + uint8_t l3_offset, l4_offset; + enum pp2_inq_desc_status status; + uint64_t addr; + + if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { + struct pp2_ppio_desc *pref_desc; + u64 pref_addr; + + pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT]; + pref_addr = cookie_addr_high | + pp2_ppio_inq_desc_get_cookie(pref_desc); + rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr)); + rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr)); + } + + addr = cookie_addr_high | + pp2_ppio_inq_desc_get_cookie(&descs[i]); + mbuf = (struct rte_mbuf *)addr; + rte_pktmbuf_reset(mbuf); + + /* drop packet in case of mac, overrun or resource error */ + status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]); + if (unlikely(status != PP2_DESC_ERR_OK)) { + struct pp2_buff_inf binf = { + .addr = rte_mbuf_data_iova_default(mbuf), + .cookie = (pp2_cookie_t)(uint64_t)mbuf, + }; + + pp2_bpool_put_buff(hif, bpool, &binf); + mrvl_port_bpool_size + [bpool->pp2_id][bpool->id][core_id]++; + q->drop_mac++; + continue; + } + + mbuf->data_off += MRVL_PKT_EFFEC_OFFS; + mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); + mbuf->data_len = mbuf->pkt_len; + mbuf->port = q->port_id; + mbuf->packet_type = + mrvl_desc_to_packet_type_and_offset(&descs[i], + &l3_offset, + &l4_offset); + mbuf->l2_len = l3_offset; + mbuf->l3_len = l4_offset - l3_offset; + + if (likely(q->cksum_enabled)) + mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]); + + rx_pkts[rx_done++] = mbuf; + q->bytes_recv += mbuf->pkt_len; + } + + if (rte_spinlock_trylock(&q->priv->lock) == 1) { + num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id); + + if (unlikely(num <= q->priv->bpool_min_size || + (!rx_done && num < q->priv->bpool_init_size))) { + ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE); + if (ret) + RTE_LOG(ERR, PMD, "Failed to fill bpool\n"); + } else if (unlikely(num > q->priv->bpool_max_size)) { + int i; + int pkt_to_remove = num - q->priv->bpool_init_size; + struct rte_mbuf *mbuf; + struct pp2_buff_inf buff; + + RTE_LOG(DEBUG, PMD, + "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n", + bpool->pp2_id, q->priv->ppio->port_id, + bpool->id, pkt_to_remove, num, + q->priv->bpool_init_size); + + for (i = 0; i < pkt_to_remove; i++) { + ret = pp2_bpool_get_buff(hif, bpool, &buff); + if (ret) + break; + mbuf = (struct rte_mbuf *) + (cookie_addr_high | buff.cookie); + rte_pktmbuf_free(mbuf); + } + mrvl_port_bpool_size + [bpool->pp2_id][bpool->id][core_id] -= i; + } + rte_spinlock_unlock(&q->priv->lock); + } + + return rx_done; +} + +/** + * Prepare offload information. + * + * @param ol_flags + * Offload flags. + * @param packet_type + * Packet type bitfield. + * @param l3_type + * Pointer to the pp2_ouq_l3_type structure. + * @param l4_type + * Pointer to the pp2_outq_l4_type structure. + * @param gen_l3_cksum + * Will be set to 1 in case l3 checksum is computed. + * @param l4_cksum + * Will be set to 1 in case l4 checksum is computed. + * + * @return + * 0 on success, negative error value otherwise. + */ +static inline int +mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type, + enum pp2_outq_l3_type *l3_type, + enum pp2_outq_l4_type *l4_type, + int *gen_l3_cksum, + int *gen_l4_cksum) +{ + /* + * Based on ol_flags prepare information + * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor + * for offloading. + */ + if (ol_flags & PKT_TX_IPV4) { + *l3_type = PP2_OUTQ_L3_TYPE_IPV4; + *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; + } else if (ol_flags & PKT_TX_IPV6) { + *l3_type = PP2_OUTQ_L3_TYPE_IPV6; + /* no checksum for ipv6 header */ + *gen_l3_cksum = 0; + } else { + /* if something different then stop processing */ + return -1; + } + + ol_flags &= PKT_TX_L4_MASK; + if ((packet_type & RTE_PTYPE_L4_TCP) && + ol_flags == PKT_TX_TCP_CKSUM) { + *l4_type = PP2_OUTQ_L4_TYPE_TCP; + *gen_l4_cksum = 1; + } else if ((packet_type & RTE_PTYPE_L4_UDP) && + ol_flags == PKT_TX_UDP_CKSUM) { + *l4_type = PP2_OUTQ_L4_TYPE_UDP; + *gen_l4_cksum = 1; + } else { + *l4_type = PP2_OUTQ_L4_TYPE_OTHER; + /* no checksum for other type */ + *gen_l4_cksum = 0; + } + + return 0; +} + +/** + * Release already sent buffers to bpool (buffer-pool). + * + * @param ppio + * Pointer to the port structure. + * @param hif + * Pointer to the MUSDK hardware interface. + * @param sq + * Pointer to the shadow queue. + * @param qid + * Queue id number. + * @param force + * Force releasing packets. + */ +static inline void +mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif, + unsigned int core_id, struct mrvl_shadow_txq *sq, + int qid, int force) +{ + struct buff_release_entry *entry; + uint16_t nb_done = 0, num = 0, skip_bufs = 0; + int i; + + pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done); + + sq->num_to_release += nb_done; + + if (likely(!force && + sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE)) + return; + + nb_done = sq->num_to_release; + sq->num_to_release = 0; + + for (i = 0; i < nb_done; i++) { + entry = &sq->ent[sq->tail + num]; + if (unlikely(!entry->buff.addr)) { + RTE_LOG(ERR, PMD, + "Shadow memory @%d: cookie(%lx), pa(%lx)!\n", + sq->tail, (u64)entry->buff.cookie, + (u64)entry->buff.addr); + skip_bufs = 1; + goto skip; + } + + if (unlikely(!entry->bpool)) { + struct rte_mbuf *mbuf; + + mbuf = (struct rte_mbuf *) + (cookie_addr_high | entry->buff.cookie); + rte_pktmbuf_free(mbuf); + skip_bufs = 1; + goto skip; + } + + mrvl_port_bpool_size + [entry->bpool->pp2_id][entry->bpool->id][core_id]++; + num++; + if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE)) + goto skip; + continue; +skip: + if (likely(num)) + pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); + num += skip_bufs; + sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; + sq->size -= num; + num = 0; + skip_bufs = 0; + } + + if (likely(num)) { + pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num); + sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK; + sq->size -= num; + } +} + +/** + * DPDK callback for transmit. + * + * @param txq + * Generic pointer transmit queue. + * @param tx_pkts + * Packets to transmit. + * @param nb_pkts + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted. + */ +static uint16_t +mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct mrvl_txq *q = txq; + struct mrvl_shadow_txq *sq; + struct pp2_hif *hif; + struct pp2_ppio_desc descs[nb_pkts]; + unsigned int core_id = rte_lcore_id(); + int i, ret, bytes_sent = 0; + uint16_t num, sq_free_size; + uint64_t addr; + + hif = mrvl_get_hif(q->priv, core_id); + sq = &q->shadow_txqs[core_id]; + + if (unlikely(!q->priv->ppio || !hif)) + return 0; + + if (sq->size) + mrvl_free_sent_buffers(q->priv->ppio, hif, core_id, + sq, q->queue_id, 0); + + sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1; + if (unlikely(nb_pkts > sq_free_size)) { + RTE_LOG(DEBUG, PMD, + "No room in shadow queue for %d packets! %d packets will be sent.\n", + nb_pkts, sq_free_size); + nb_pkts = sq_free_size; + } + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *mbuf = tx_pkts[i]; + int gen_l3_cksum, gen_l4_cksum; + enum pp2_outq_l3_type l3_type; + enum pp2_outq_l4_type l4_type; + + if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { + struct rte_mbuf *pref_pkt_hdr; + + pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT]; + rte_mbuf_prefetch_part1(pref_pkt_hdr); + rte_mbuf_prefetch_part2(pref_pkt_hdr); + } + + sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf; + sq->ent[sq->head].buff.addr = + rte_mbuf_data_iova_default(mbuf); + sq->ent[sq->head].bpool = + (unlikely(mbuf->port >= RTE_MAX_ETHPORTS || + mbuf->refcnt > 1)) ? NULL : + mrvl_port_to_bpool_lookup[mbuf->port]; + sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; + sq->size++; + + pp2_ppio_outq_desc_reset(&descs[i]); + pp2_ppio_outq_desc_set_phys_addr(&descs[i], + rte_pktmbuf_iova(mbuf)); + pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0); + pp2_ppio_outq_desc_set_pkt_len(&descs[i], + rte_pktmbuf_pkt_len(mbuf)); + + bytes_sent += rte_pktmbuf_pkt_len(mbuf); + /* + * in case unsupported ol_flags were passed + * do not update descriptor offload information + */ + ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type, + &l3_type, &l4_type, &gen_l3_cksum, + &gen_l4_cksum); + if (unlikely(ret)) + continue; + + pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, + mbuf->l2_len, + mbuf->l2_len + mbuf->l3_len, + gen_l3_cksum, gen_l4_cksum); + } + + num = nb_pkts; + pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts); + /* number of packets that were not sent */ + if (unlikely(num > nb_pkts)) { + for (i = nb_pkts; i < num; i++) { + sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) & + MRVL_PP2_TX_SHADOWQ_MASK; + addr = cookie_addr_high | sq->ent[sq->head].buff.cookie; + bytes_sent -= + rte_pktmbuf_pkt_len((struct rte_mbuf *)addr); + } + sq->size -= num - nb_pkts; + } + + q->bytes_sent += bytes_sent; + + return nb_pkts; +} + +/** + * Initialize packet processor. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_init_pp2(void) +{ + struct pp2_init_params init_params; + + memset(&init_params, 0, sizeof(init_params)); + init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED; + init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED; + init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED; + + return pp2_init(&init_params); +} + +/** + * Deinitialize packet processor. + * + * @return + * 0 on success, negative error value otherwise. + */ +static void +mrvl_deinit_pp2(void) +{ + pp2_deinit(); +} + +/** + * Create private device structure. + * + * @param dev_name + * Pointer to the port name passed in the initialization parameters. + * + * @return + * Pointer to the newly allocated private device structure. + */ +static struct mrvl_priv * +mrvl_priv_create(const char *dev_name) +{ + struct pp2_bpool_params bpool_params; + char match[MRVL_MATCH_LEN]; + struct mrvl_priv *priv; + int ret, bpool_bit; + + priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id()); + if (!priv) + return NULL; + + ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name, + &priv->pp_id, &priv->ppio_id); + if (ret) + goto out_free_priv; + + bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id], + PP2_BPOOL_NUM_POOLS); + if (bpool_bit < 0) + goto out_free_priv; + priv->bpool_bit = bpool_bit; + + snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id, + priv->bpool_bit); + memset(&bpool_params, 0, sizeof(bpool_params)); + bpool_params.match = match; + bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS; + ret = pp2_bpool_init(&bpool_params, &priv->bpool); + if (ret) + goto out_clear_bpool_bit; + + priv->ppio_params.type = PP2_PPIO_T_NIC; + rte_spinlock_init(&priv->lock); + + return priv; +out_clear_bpool_bit: + used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); +out_free_priv: + rte_free(priv); + return NULL; +} + +/** + * Create device representing Ethernet port. + * + * @param name + * Pointer to the port's name. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name) +{ + int ret, fd = socket(AF_INET, SOCK_DGRAM, 0); + struct rte_eth_dev *eth_dev; + struct mrvl_priv *priv; + struct ifreq req; + + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) + return -ENOMEM; + + priv = mrvl_priv_create(name); + if (!priv) { + ret = -ENOMEM; + goto out_free_dev; + } + + eth_dev->data->mac_addrs = + rte_zmalloc("mac_addrs", + ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0); + if (!eth_dev->data->mac_addrs) { + RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n"); + ret = -ENOMEM; + goto out_free_priv; + } + + memset(&req, 0, sizeof(req)); + strcpy(req.ifr_name, name); + ret = ioctl(fd, SIOCGIFHWADDR, &req); + if (ret) + goto out_free_mac; + + memcpy(eth_dev->data->mac_addrs[0].addr_bytes, + req.ifr_addr.sa_data, ETHER_ADDR_LEN); + + eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst; + eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst; + eth_dev->data->kdrv = RTE_KDRV_NONE; + eth_dev->data->dev_private = priv; + eth_dev->device = &vdev->device; + eth_dev->dev_ops = &mrvl_ops; + + rte_eth_dev_probing_finish(eth_dev); + return 0; +out_free_mac: + rte_free(eth_dev->data->mac_addrs); +out_free_dev: + rte_eth_dev_release_port(eth_dev); +out_free_priv: + rte_free(priv); + + return ret; +} + +/** + * Cleanup previously created device representing Ethernet port. + * + * @param name + * Pointer to the port name. + */ +static void +mrvl_eth_dev_destroy(const char *name) +{ + struct rte_eth_dev *eth_dev; + struct mrvl_priv *priv; + + eth_dev = rte_eth_dev_allocated(name); + if (!eth_dev) + return; + + priv = eth_dev->data->dev_private; + pp2_bpool_deinit(priv->bpool); + used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit); + rte_free(priv); + rte_free(eth_dev->data->mac_addrs); + rte_eth_dev_release_port(eth_dev); +} + +/** + * Callback used by rte_kvargs_process() during argument parsing. + * + * @param key + * Pointer to the parsed key (unused). + * @param value + * Pointer to the parsed value. + * @param extra_args + * Pointer to the extra arguments which contains address of the + * table of pointers to parsed interface names. + * + * @return + * Always 0. + */ +static int +mrvl_get_ifnames(const char *key __rte_unused, const char *value, + void *extra_args) +{ + struct mrvl_ifnames *ifnames = extra_args; + + ifnames->names[ifnames->idx++] = value; + + return 0; +} + +/** + * Deinitialize per-lcore MUSDK hardware interfaces (hifs). + */ +static void +mrvl_deinit_hifs(void) +{ + int i; + + for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) { + if (hifs[i]) + pp2_hif_deinit(hifs[i]); + } + used_hifs = MRVL_MUSDK_HIFS_RESERVED; + memset(hifs, 0, sizeof(hifs)); +} + +/** + * DPDK callback to register the virtual device. + * + * @param vdev + * Pointer to the virtual device. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +rte_pmd_mrvl_probe(struct rte_vdev_device *vdev) +{ + struct rte_kvargs *kvlist; + struct mrvl_ifnames ifnames; + int ret = -EINVAL; + uint32_t i, ifnum, cfgnum; + const char *params; + + params = rte_vdev_device_args(vdev); + if (!params) + return -EINVAL; + + kvlist = rte_kvargs_parse(params, valid_args); + if (!kvlist) + return -EINVAL; + + ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG); + if (ifnum > RTE_DIM(ifnames.names)) + goto out_free_kvlist; + + ifnames.idx = 0; + rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG, + mrvl_get_ifnames, &ifnames); + + + /* + * The below system initialization should be done only once, + * on the first provided configuration file + */ + if (!mrvl_qos_cfg) { + cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG); + RTE_LOG(INFO, PMD, "Parsing config file!\n"); + if (cfgnum > 1) { + RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n"); + goto out_free_kvlist; + } else if (cfgnum == 1) { + rte_kvargs_process(kvlist, MRVL_CFG_ARG, + mrvl_get_qoscfg, &mrvl_qos_cfg); + } + } + + if (mrvl_dev_num) + goto init_devices; + + RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n"); + /* + * ret == -EEXIST is correct, it means DMA + * has been already initialized (by another PMD). + */ + ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); + if (ret < 0) { + if (ret != -EEXIST) + goto out_free_kvlist; + else + RTE_LOG(INFO, PMD, + "DMA memory has been already initialized by a different driver.\n"); + } + + ret = mrvl_init_pp2(); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to init PP!\n"); + goto out_deinit_dma; + } + + memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size)); + memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup)); + + mrvl_lcore_first = RTE_MAX_LCORE; + mrvl_lcore_last = 0; + +init_devices: + for (i = 0; i < ifnum; i++) { + RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]); + ret = mrvl_eth_dev_create(vdev, ifnames.names[i]); + if (ret) + goto out_cleanup; + } + mrvl_dev_num += ifnum; + + rte_kvargs_free(kvlist); + + return 0; +out_cleanup: + for (; i > 0; i--) + mrvl_eth_dev_destroy(ifnames.names[i]); + + if (mrvl_dev_num == 0) + mrvl_deinit_pp2(); +out_deinit_dma: + if (mrvl_dev_num == 0) + mv_sys_dma_mem_destroy(); +out_free_kvlist: + rte_kvargs_free(kvlist); + + return ret; +} + +/** + * DPDK callback to remove virtual device. + * + * @param vdev + * Pointer to the removed virtual device. + * + * @return + * 0 on success, negative error value otherwise. + */ +static int +rte_pmd_mrvl_remove(struct rte_vdev_device *vdev) +{ + int i; + const char *name; + + name = rte_vdev_device_name(vdev); + if (!name) + return -EINVAL; + + RTE_LOG(INFO, PMD, "Removing %s\n", name); + + RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */ + char ifname[RTE_ETH_NAME_MAX_LEN]; + + rte_eth_dev_get_name_by_port(i, ifname); + mrvl_eth_dev_destroy(ifname); + mrvl_dev_num--; + } + + if (mrvl_dev_num == 0) { + RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n"); + mrvl_deinit_hifs(); + mrvl_deinit_pp2(); + mv_sys_dma_mem_destroy(); + } + + return 0; +} + +static struct rte_vdev_driver pmd_mrvl_drv = { + .probe = rte_pmd_mrvl_probe, + .remove = rte_pmd_mrvl_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv); +RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2); diff --git a/drivers/net/mvpp2/mrvl_ethdev.h b/drivers/net/mvpp2/mrvl_ethdev.h new file mode 100644 index 00000000..3a428092 --- /dev/null +++ b/drivers/net/mvpp2/mrvl_ethdev.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#ifndef _MRVL_ETHDEV_H_ +#define _MRVL_ETHDEV_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include + +/** Maximum number of rx queues per port */ +#define MRVL_PP2_RXQ_MAX 32 + +/** Maximum number of tx queues per port */ +#define MRVL_PP2_TXQ_MAX 8 + +/** Minimum number of descriptors in tx queue */ +#define MRVL_PP2_TXD_MIN 16 + +/** Maximum number of descriptors in tx queue */ +#define MRVL_PP2_TXD_MAX 2048 + +/** Tx queue descriptors alignment */ +#define MRVL_PP2_TXD_ALIGN 16 + +/** Minimum number of descriptors in rx queue */ +#define MRVL_PP2_RXD_MIN 16 + +/** Maximum number of descriptors in rx queue */ +#define MRVL_PP2_RXD_MAX 2048 + +/** Rx queue descriptors alignment */ +#define MRVL_PP2_RXD_ALIGN 16 + +/** Maximum number of descriptors in tx aggregated queue */ +#define MRVL_PP2_AGGR_TXQD_MAX 2048 + +/** Maximum number of Traffic Classes. */ +#define MRVL_PP2_TC_MAX 8 + +/** Packet offset inside RX buffer. */ +#define MRVL_PKT_OFFS 64 + +/** Maximum number of descriptors in shadow queue. Must be power of 2 */ +#define MRVL_PP2_TX_SHADOWQ_SIZE MRVL_PP2_TXD_MAX + +/** Shadow queue size mask (since shadow queue size is power of 2) */ +#define MRVL_PP2_TX_SHADOWQ_MASK (MRVL_PP2_TX_SHADOWQ_SIZE - 1) + +/** Minimum number of sent buffers to release from shadow queue to BM */ +#define MRVL_PP2_BUF_RELEASE_BURST_SIZE 64 + +struct mrvl_priv { + /* Hot fields, used in fast path. */ + struct pp2_bpool *bpool; /**< BPool pointer */ + struct pp2_ppio *ppio; /**< Port handler pointer */ + rte_spinlock_t lock; /**< Spinlock for checking bpool status */ + uint16_t bpool_max_size; /**< BPool maximum size */ + uint16_t bpool_min_size; /**< BPool minimum size */ + uint16_t bpool_init_size; /**< Configured BPool size */ + + /** Mapping for DPDK rx queue->(TC, MRVL relative inq) */ + struct { + uint8_t tc; /**< Traffic Class */ + uint8_t inq; /**< Relative in-queue number */ + } rxq_map[MRVL_PP2_RXQ_MAX] __rte_cache_aligned; + + /* Configuration data, used sporadically. */ + uint8_t pp_id; + uint8_t ppio_id; + uint8_t bpool_bit; + uint8_t rss_hf_tcp; + uint8_t uc_mc_flushed; + uint8_t vlan_flushed; + uint8_t isolated; + + struct pp2_ppio_params ppio_params; + struct pp2_cls_qos_tbl_params qos_tbl_params; + struct pp2_cls_tbl *qos_tbl; + uint16_t nb_rx_queues; + + struct pp2_cls_tbl_params cls_tbl_params; + struct pp2_cls_tbl *cls_tbl; + uint32_t cls_tbl_pattern; + LIST_HEAD(mrvl_flows, rte_flow) flows; + + struct pp2_cls_plcr *policer; +}; + +/** Flow operations forward declaration. */ +extern const struct rte_flow_ops mrvl_flow_ops; +#endif /* _MRVL_ETHDEV_H_ */ diff --git a/drivers/net/mvpp2/mrvl_flow.c b/drivers/net/mvpp2/mrvl_flow.c new file mode 100644 index 00000000..437c987c --- /dev/null +++ b/drivers/net/mvpp2/mrvl_flow.c @@ -0,0 +1,2779 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Marvell International Ltd. + * Copyright(c) 2018 Semihalf. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include + +#ifdef container_of +#undef container_of +#endif + +#include "mrvl_ethdev.h" +#include "mrvl_qos.h" +#include "env/mv_common.h" /* for BIT() */ + +/** Number of rules in the classifier table. */ +#define MRVL_CLS_MAX_NUM_RULES 20 + +/** Size of the classifier key and mask strings. */ +#define MRVL_CLS_STR_SIZE_MAX 40 + +/** Parsed fields in processed rte_flow_item. */ +enum mrvl_parsed_fields { + /* eth flags */ + F_DMAC = BIT(0), + F_SMAC = BIT(1), + F_TYPE = BIT(2), + /* vlan flags */ + F_VLAN_ID = BIT(3), + F_VLAN_PRI = BIT(4), + F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */ + /* ip4 flags */ + F_IP4_TOS = BIT(6), + F_IP4_SIP = BIT(7), + F_IP4_DIP = BIT(8), + F_IP4_PROTO = BIT(9), + /* ip6 flags */ + F_IP6_TC = BIT(10), /* not supported by MUSDK yet */ + F_IP6_SIP = BIT(11), + F_IP6_DIP = BIT(12), + F_IP6_FLOW = BIT(13), + F_IP6_NEXT_HDR = BIT(14), + /* tcp flags */ + F_TCP_SPORT = BIT(15), + F_TCP_DPORT = BIT(16), + /* udp flags */ + F_UDP_SPORT = BIT(17), + F_UDP_DPORT = BIT(18), +}; + +/** PMD-specific definition of a flow rule handle. */ +struct rte_flow { + LIST_ENTRY(rte_flow) next; + + enum mrvl_parsed_fields pattern; + + struct pp2_cls_tbl_rule rule; + struct pp2_cls_cos_desc cos; + struct pp2_cls_tbl_action action; +}; + +static const enum rte_flow_item_type pattern_eth[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_vlan[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_vlan_ip[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip4_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip6[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_eth_ip6_udp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip_udp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip6[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = { + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip[] = { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip6[] = { + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip_tcp[] = { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip6_tcp[] = { + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip_udp[] = { + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_ip6_udp[] = { + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_tcp[] = { + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_END +}; + +static const enum rte_flow_item_type pattern_udp[] = { + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_END +}; + +#define MRVL_VLAN_ID_MASK 0x0fff +#define MRVL_VLAN_PRI_MASK 0x7000 +#define MRVL_IPV4_DSCP_MASK 0xfc +#define MRVL_IPV4_ADDR_MASK 0xffffffff +#define MRVL_IPV6_FLOW_MASK 0x0fffff + +/** + * Given a flow item, return the next non-void one. + * + * @param items Pointer to the item in the table. + * @returns Next not-void item, NULL otherwise. + */ +static const struct rte_flow_item * +mrvl_next_item(const struct rte_flow_item *items) +{ + const struct rte_flow_item *item = items; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->type != RTE_FLOW_ITEM_TYPE_VOID) + return item; + } + + return NULL; +} + +/** + * Allocate memory for classifier rule key and mask fields. + * + * @param field Pointer to the classifier rule. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field) +{ + unsigned int id = rte_socket_id(); + + field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id); + if (!field->key) + goto out; + + field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id); + if (!field->mask) + goto out_mask; + + return 0; +out_mask: + rte_free(field->key); +out: + field->key = NULL; + field->mask = NULL; + return -1; +} + +/** + * Free memory allocated for classifier rule key and mask fields. + * + * @param field Pointer to the classifier rule. + */ +static void +mrvl_free_key_mask(struct pp2_cls_rule_key_field *field) +{ + rte_free(field->key); + rte_free(field->mask); + field->key = NULL; + field->mask = NULL; +} + +/** + * Free memory allocated for all classifier rule key and mask fields. + * + * @param rule Pointer to the classifier table rule. + */ +static void +mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule) +{ + int i; + + for (i = 0; i < rule->num_fields; i++) + mrvl_free_key_mask(&rule->fields[i]); + rule->num_fields = 0; +} + +/* + * Initialize rte flow item parsing. + * + * @param item Pointer to the flow item. + * @param spec_ptr Pointer to the specific item pointer. + * @param mask_ptr Pointer to the specific item's mask pointer. + * @def_mask Pointer to the default mask. + * @size Size of the flow item. + * @error Pointer to the rte flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_init(const struct rte_flow_item *item, + const void **spec_ptr, + const void **mask_ptr, + const void *def_mask, + unsigned int size, + struct rte_flow_error *error) +{ + const uint8_t *spec; + const uint8_t *mask; + const uint8_t *last; + uint8_t zeros[size]; + + memset(zeros, 0, size); + + if (item == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "NULL item\n"); + return -rte_errno; + } + + if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Mask or last is set without spec\n"); + return -rte_errno; + } + + /* + * If "mask" is not set, default mask is used, + * but if default mask is NULL, "mask" should be set. + */ + if (item->mask == NULL) { + if (def_mask == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Mask should be specified\n"); + return -rte_errno; + } + + mask = (const uint8_t *)def_mask; + } else { + mask = (const uint8_t *)item->mask; + } + + spec = (const uint8_t *)item->spec; + last = (const uint8_t *)item->last; + + if (spec == NULL) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Spec should be specified\n"); + return -rte_errno; + } + + /* + * If field values in "last" are either 0 or equal to the corresponding + * values in "spec" then they are ignored. + */ + if (last != NULL && + !memcmp(last, zeros, size) && + memcmp(last, spec, size) != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Ranging is not supported\n"); + return -rte_errno; + } + + *spec_ptr = spec; + *mask_ptr = mask; + + return 0; +} + +/** + * Parse the eth flow item. + * + * This will create classifier rule that matches either destination or source + * mac. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param mask Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_mac(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + const uint8_t *k, *m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + if (parse_dst) { + k = spec->dst.addr_bytes; + m = mask->dst.addr_bytes; + + flow->pattern |= F_DMAC; + } else { + k = spec->src.addr_bytes; + m = mask->src.addr_bytes; + + flow->pattern |= F_SMAC; + } + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 6; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, + "%02x:%02x:%02x:%02x:%02x:%02x", + k[0], k[1], k[2], k[3], k[4], k[5]); + + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, + "%02x:%02x:%02x:%02x:%02x:%02x", + m[0], m[1], m[2], m[3], m[4], m[5]); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing the eth flow item destination mac address. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_dmac(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask, + struct rte_flow *flow) +{ + return mrvl_parse_mac(spec, mask, 1, flow); +} + +/** + * Helper for parsing the eth flow item source mac address. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_smac(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask, + struct rte_flow *flow) +{ + return mrvl_parse_mac(spec, mask, 0, flow); +} + +/** + * Parse the ether type field of the eth flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_type(const struct rte_flow_item_eth *spec, + const struct rte_flow_item_eth *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + k = rte_be_to_cpu_16(spec->type); + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_TYPE; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the vid field of the vlan rte flow item. + * + * This will create classifier rule that matches vid. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec, + const struct rte_flow_item_vlan *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK; + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_VLAN_ID; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the pri field of the vlan rte flow item. + * + * This will create classifier rule that matches pri. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec, + const struct rte_flow_item_vlan *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13; + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_VLAN_PRI; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the dscp field of the ipv4 rte flow item. + * + * This will create classifier rule that matches dscp field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint8_t k, m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2; + m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2; + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m); + + flow->pattern |= F_IP4_TOS; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse either source or destination ip addresses of the ipv4 flow item. + * + * This will create classifier rule that matches either destination + * or source ip field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + struct in_addr k; + uint32_t m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + memset(&k, 0, sizeof(k)); + if (parse_dst) { + k.s_addr = spec->hdr.dst_addr; + m = rte_be_to_cpu_32(mask->hdr.dst_addr); + + flow->pattern |= F_IP4_DIP; + } else { + k.s_addr = spec->hdr.src_addr; + m = rte_be_to_cpu_32(mask->hdr.src_addr); + + flow->pattern |= F_IP4_SIP; + } + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 4; + + inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX); + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing destination ip of the ipv4 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip4_addr(spec, mask, 1, flow); +} + +/** + * Helper for parsing source ip of the ipv4 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip4_addr(spec, mask, 0, flow); +} + +/** + * Parse the proto field of the ipv4 rte flow item. + * + * This will create classifier rule that matches proto field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec, + const struct rte_flow_item_ipv4 *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint8_t k = spec->hdr.next_proto_id; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_IP4_PROTO; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse either source or destination ip addresses of the ipv6 rte flow item. + * + * This will create classifier rule that matches either destination + * or source ip field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + int size = sizeof(spec->hdr.dst_addr); + struct in6_addr k, m; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + memset(&k, 0, sizeof(k)); + if (parse_dst) { + memcpy(k.s6_addr, spec->hdr.dst_addr, size); + memcpy(m.s6_addr, mask->hdr.dst_addr, size); + + flow->pattern |= F_IP6_DIP; + } else { + memcpy(k.s6_addr, spec->hdr.src_addr, size); + memcpy(m.s6_addr, mask->hdr.src_addr, size); + + flow->pattern |= F_IP6_SIP; + } + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 16; + + inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX); + inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing destination ip of the ipv6 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip6_addr(spec, mask, 1, flow); +} + +/** + * Helper for parsing source ip of the ipv6 flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + struct rte_flow *flow) +{ + return mrvl_parse_ip6_addr(spec, mask, 0, flow); +} + +/** + * Parse the flow label of the ipv6 flow item. + * + * This will create classifier rule that matches flow field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK, + m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 3; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m); + + flow->pattern |= F_IP6_FLOW; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse the next header of the ipv6 flow item. + * + * This will create classifier rule that matches next header field. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec, + const struct rte_flow_item_ipv6 *mask __rte_unused, + struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint8_t k = spec->hdr.proto; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 1; + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->pattern |= F_IP6_NEXT_HDR; + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Parse destination or source port of the tcp flow item. + * + * This will create classifier rule that matches either destination or + * source tcp port. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec, + const struct rte_flow_item_tcp *mask __rte_unused, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + if (parse_dst) { + k = rte_be_to_cpu_16(spec->hdr.dst_port); + + flow->pattern |= F_TCP_DPORT; + } else { + k = rte_be_to_cpu_16(spec->hdr.src_port); + + flow->pattern |= F_TCP_SPORT; + } + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing the tcp source port of the tcp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec, + const struct rte_flow_item_tcp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_tcp_port(spec, mask, 0, flow); +} + +/** + * Helper for parsing the tcp destination port of the tcp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec, + const struct rte_flow_item_tcp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_tcp_port(spec, mask, 1, flow); +} + +/** + * Parse destination or source port of the udp flow item. + * + * This will create classifier rule that matches either destination or + * source udp port. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static int +mrvl_parse_udp_port(const struct rte_flow_item_udp *spec, + const struct rte_flow_item_udp *mask __rte_unused, + int parse_dst, struct rte_flow *flow) +{ + struct pp2_cls_rule_key_field *key_field; + uint16_t k; + + if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS) + return -ENOSPC; + + key_field = &flow->rule.fields[flow->rule.num_fields]; + mrvl_alloc_key_mask(key_field); + key_field->size = 2; + + if (parse_dst) { + k = rte_be_to_cpu_16(spec->hdr.dst_port); + + flow->pattern |= F_UDP_DPORT; + } else { + k = rte_be_to_cpu_16(spec->hdr.src_port); + + flow->pattern |= F_UDP_SPORT; + } + + snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k); + + flow->rule.num_fields += 1; + + return 0; +} + +/** + * Helper for parsing the udp source port of the udp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec, + const struct rte_flow_item_udp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_udp_port(spec, mask, 0, flow); +} + +/** + * Helper for parsing the udp destination port of the udp flow item. + * + * @param spec Pointer to the specific flow item. + * @param mask Pointer to the specific flow item's mask. + * @param flow Pointer to the flow. + * @return 0 in case of success, negative error value otherwise. + */ +static inline int +mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec, + const struct rte_flow_item_udp *mask, + struct rte_flow *flow) +{ + return mrvl_parse_udp_port(spec, mask, 1, flow); +} + +/** + * Parse eth flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_eth *spec = NULL, *mask = NULL; + struct ether_addr zero; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_eth_mask, + sizeof(struct rte_flow_item_eth), error); + if (ret) + return ret; + + memset(&zero, 0, sizeof(zero)); + + if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) { + ret = mrvl_parse_dmac(spec, mask, flow); + if (ret) + goto out; + } + + if (memcmp(&mask->src, &zero, sizeof(mask->src))) { + ret = mrvl_parse_smac(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->type) { + RTE_LOG(WARNING, PMD, "eth type mask is ignored\n"); + ret = mrvl_parse_type(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse vlan flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_vlan(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_vlan *spec = NULL, *mask = NULL; + uint16_t m; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_vlan_mask, + sizeof(struct rte_flow_item_vlan), error); + if (ret) + return ret; + + m = rte_be_to_cpu_16(mask->tci); + if (m & MRVL_VLAN_ID_MASK) { + RTE_LOG(WARNING, PMD, "vlan id mask is ignored\n"); + ret = mrvl_parse_vlan_id(spec, mask, flow); + if (ret) + goto out; + } + + if (m & MRVL_VLAN_PRI_MASK) { + RTE_LOG(WARNING, PMD, "vlan pri mask is ignored\n"); + ret = mrvl_parse_vlan_pri(spec, mask, flow); + if (ret) + goto out; + } + + if (flow->pattern & F_TYPE) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VLAN TPID matching is not supported\n"); + return -rte_errno; + } + if (mask->inner_type) { + struct rte_flow_item_eth spec_eth = { + .type = spec->inner_type, + }; + struct rte_flow_item_eth mask_eth = { + .type = mask->inner_type, + }; + + RTE_LOG(WARNING, PMD, "inner eth type mask is ignored\n"); + ret = mrvl_parse_type(&spec_eth, &mask_eth, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse ipv4 flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_ip4(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_ipv4_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret) + return ret; + + if (mask->hdr.version_ihl || + mask->hdr.total_length || + mask->hdr.packet_id || + mask->hdr.fragment_offset || + mask->hdr.time_to_live || + mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) { + ret = mrvl_parse_ip4_dscp(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.src_addr) { + ret = mrvl_parse_ip4_sip(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.dst_addr) { + ret = mrvl_parse_ip4_dip(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.next_proto_id) { + RTE_LOG(WARNING, PMD, "next proto id mask is ignored\n"); + ret = mrvl_parse_ip4_proto(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse ipv6 flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_ip6(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL; + struct ipv6_hdr zero; + uint32_t flow_mask; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, + (const void **)&mask, + &rte_flow_item_ipv6_mask, + sizeof(struct rte_flow_item_ipv6), + error); + if (ret) + return ret; + + memset(&zero, 0, sizeof(zero)); + + if (mask->hdr.payload_len || + mask->hdr.hop_limits) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (memcmp(mask->hdr.src_addr, + zero.src_addr, sizeof(mask->hdr.src_addr))) { + ret = mrvl_parse_ip6_sip(spec, mask, flow); + if (ret) + goto out; + } + + if (memcmp(mask->hdr.dst_addr, + zero.dst_addr, sizeof(mask->hdr.dst_addr))) { + ret = mrvl_parse_ip6_dip(spec, mask, flow); + if (ret) + goto out; + } + + flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK; + if (flow_mask) { + ret = mrvl_parse_ip6_flow(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.proto) { + RTE_LOG(WARNING, PMD, "next header mask is ignored\n"); + ret = mrvl_parse_ip6_next_hdr(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse tcp flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_tcp(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_tcp *spec = NULL, *mask = NULL; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_ipv4_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret) + return ret; + + if (mask->hdr.sent_seq || + mask->hdr.recv_ack || + mask->hdr.data_off || + mask->hdr.tcp_flags || + mask->hdr.rx_win || + mask->hdr.cksum || + mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (mask->hdr.src_port) { + RTE_LOG(WARNING, PMD, "tcp sport mask is ignored\n"); + ret = mrvl_parse_tcp_sport(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.dst_port) { + RTE_LOG(WARNING, PMD, "tcp dport mask is ignored\n"); + ret = mrvl_parse_tcp_dport(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse udp flow item. + * + * @param item Pointer to the flow item. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param fields Pointer to the parsed parsed fields enum. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_parse_udp(const struct rte_flow_item *item, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item_udp *spec = NULL, *mask = NULL; + int ret; + + ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask, + &rte_flow_item_ipv4_mask, + sizeof(struct rte_flow_item_ipv4), error); + if (ret) + return ret; + + if (mask->hdr.dgram_len || + mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "Not supported by classifier\n"); + return -rte_errno; + } + + if (mask->hdr.src_port) { + RTE_LOG(WARNING, PMD, "udp sport mask is ignored\n"); + ret = mrvl_parse_udp_sport(spec, mask, flow); + if (ret) + goto out; + } + + if (mask->hdr.dst_port) { + RTE_LOG(WARNING, PMD, "udp dport mask is ignored\n"); + ret = mrvl_parse_udp_dport(spec, mask, flow); + if (ret) + goto out; + } + + return 0; +out: + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Reached maximum number of fields in cls tbl key\n"); + return -rte_errno; +} + +/** + * Parse flow pattern composed of the the eth item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_eth(pattern, flow, error); +} + +/** + * Parse flow pattern composed of the eth and vlan items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_eth(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return mrvl_parse_vlan(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, vlan and ip4/ip6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_eth(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + ret = mrvl_parse_vlan(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, vlan and ipv4 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the eth, vlan and ipv6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth and ip4/ip6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_eth(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth and ipv4 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the eth and ipv6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth, ip4 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param tcp 1 to parse tcp item, 0 to parse udp item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, ipv4 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth, ipv4 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the eth, ipv6 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param tcp 1 to parse tcp item, 0 to parse udp item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the eth, ipv6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the eth, ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the vlan item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return mrvl_parse_vlan(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan and ip4/ip6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_vlan(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan and ipv4 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan, ipv4 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the vlan, ipv4 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the vlan and ipv6 items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int tcp) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + item = mrvl_next_item(item + 1); + + if (tcp) + return mrvl_parse_tcp(item, flow, error); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the vlan, ipv6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the vlan, ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ip4/ip6 item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); +} + +/** + * Parse flow pattern composed of the ipv4 item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ipv6 item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the ip4/ip6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @param ip6 1 to parse ip6 item, 0 to parse ip4 item. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return mrvl_parse_tcp(item, flow, error); +} + +/** + * Parse flow pattern composed of the ipv4 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ipv6 and tcp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the ipv4/ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error, int ip6) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + int ret; + + ret = ip6 ? mrvl_parse_ip6(item, flow, error) : + mrvl_parse_ip4(item, flow, error); + if (ret) + return ret; + + item = mrvl_next_item(item + 1); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Parse flow pattern composed of the ipv4 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0); +} + +/** + * Parse flow pattern composed of the ipv6 and udp items. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static inline int +mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1); +} + +/** + * Parse flow pattern composed of the tcp item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return mrvl_parse_tcp(item, flow, error); +} + +/** + * Parse flow pattern composed of the udp item. + * + * @param pattern Pointer to the flow pattern table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_parse_pattern_udp(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_item *item = mrvl_next_item(pattern); + + return mrvl_parse_udp(item, flow, error); +} + +/** + * Structure used to map specific flow pattern to the pattern parse callback + * which will iterate over each pattern item and extract relevant data. + */ +static const struct { + const enum rte_flow_item_type *pattern; + int (*parse)(const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error); +} mrvl_patterns[] = { + { pattern_eth, mrvl_parse_pattern_eth }, + { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan }, + { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 }, + { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 }, + { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 }, + { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp }, + { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp }, + { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 }, + { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp }, + { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp }, + { pattern_vlan, mrvl_parse_pattern_vlan }, + { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 }, + { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp }, + { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp }, + { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 }, + { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp }, + { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp }, + { pattern_ip, mrvl_parse_pattern_ip4 }, + { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp }, + { pattern_ip_udp, mrvl_parse_pattern_ip4_udp }, + { pattern_ip6, mrvl_parse_pattern_ip6 }, + { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp }, + { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp }, + { pattern_tcp, mrvl_parse_pattern_tcp }, + { pattern_udp, mrvl_parse_pattern_udp } +}; + +/** + * Check whether provided pattern matches any of the supported ones. + * + * @param type_pattern Pointer to the pattern type. + * @param item_pattern Pointer to the flow pattern. + * @returns 1 in case of success, 0 value otherwise. + */ +static int +mrvl_patterns_match(const enum rte_flow_item_type *type_pattern, + const struct rte_flow_item *item_pattern) +{ + const enum rte_flow_item_type *type = type_pattern; + const struct rte_flow_item *item = item_pattern; + + for (;;) { + if (item->type == RTE_FLOW_ITEM_TYPE_VOID) { + item++; + continue; + } + + if (*type == RTE_FLOW_ITEM_TYPE_END || + item->type == RTE_FLOW_ITEM_TYPE_END) + break; + + if (*type != item->type) + break; + + item++; + type++; + } + + return *type == item->type; +} + +/** + * Parse flow attribute. + * + * This will check whether the provided attribute's flags are supported. + * + * @param priv Unused + * @param attr Pointer to the flow attribute. + * @param flow Unused + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused, + const struct rte_flow_attr *attr, + struct rte_flow *flow __rte_unused, + struct rte_flow_error *error) +{ + if (!attr) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute"); + return -rte_errno; + } + + if (attr->group) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL, + "Groups are not supported"); + return -rte_errno; + } + if (attr->priority) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, + "Priorities are not supported"); + return -rte_errno; + } + if (!attr->ingress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL, + "Only ingress is supported"); + return -rte_errno; + } + if (attr->egress) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "Egress is not supported"); + return -rte_errno; + } + if (attr->transfer) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, + "Transfer is not supported"); + return -rte_errno; + } + + return 0; +} + +/** + * Parse flow pattern. + * + * Specific classifier rule will be created as well. + * + * @param priv Unused + * @param pattern Pointer to the flow pattern. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused, + const struct rte_flow_item pattern[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + unsigned int i; + int ret; + + for (i = 0; i < RTE_DIM(mrvl_patterns); i++) { + if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern)) + continue; + + ret = mrvl_patterns[i].parse(pattern, flow, error); + if (ret) + mrvl_free_all_key_mask(&flow->rule); + + return ret; + } + + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Unsupported pattern"); + + return -rte_errno; +} + +/** + * Parse flow actions. + * + * @param priv Pointer to the port's private data. + * @param actions Pointer the action table. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_parse_actions(struct mrvl_priv *priv, + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + const struct rte_flow_action *action = actions; + int specified = 0; + + for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { + if (action->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + + if (action->type == RTE_FLOW_ACTION_TYPE_DROP) { + flow->cos.ppio = priv->ppio; + flow->cos.tc = 0; + flow->action.type = PP2_CLS_TBL_ACT_DROP; + flow->action.cos = &flow->cos; + specified++; + } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) { + const struct rte_flow_action_queue *q = + (const struct rte_flow_action_queue *) + action->conf; + + if (q->index > priv->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Queue index out of range"); + return -rte_errno; + } + + if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) { + /* + * Unknown TC mapping, mapping will not have + * a correct queue. + */ + RTE_LOG(ERR, PMD, + "Unknown TC mapping for queue %hu eth%hhu\n", + q->index, priv->ppio_id); + + rte_flow_error_set(error, EFAULT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, NULL); + return -rte_errno; + } + + RTE_LOG(DEBUG, PMD, + "Action: Assign packets to queue %d, tc:%d, q:%d\n", + q->index, priv->rxq_map[q->index].tc, + priv->rxq_map[q->index].inq); + + flow->cos.ppio = priv->ppio; + flow->cos.tc = priv->rxq_map[q->index].tc; + flow->action.type = PP2_CLS_TBL_ACT_DONE; + flow->action.cos = &flow->cos; + specified++; + } else { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Action not supported"); + return -rte_errno; + } + + } + + if (!specified) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Action not specified"); + return -rte_errno; + } + + return 0; +} + +/** + * Parse flow attribute, pattern and actions. + * + * @param priv Pointer to the port's private data. + * @param attr Pointer to the flow attribute. + * @param pattern Pointer to the flow pattern. + * @param actions Pointer to the flow actions. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + + ret = mrvl_flow_parse_attr(priv, attr, flow, error); + if (ret) + return ret; + + ret = mrvl_flow_parse_pattern(priv, pattern, flow, error); + if (ret) + return ret; + + return mrvl_flow_parse_actions(priv, actions, flow, error); +} + +static inline enum pp2_cls_tbl_type +mrvl_engine_type(const struct rte_flow *flow) +{ + int i, size = 0; + + for (i = 0; i < flow->rule.num_fields; i++) + size += flow->rule.fields[i].size; + + /* + * For maskable engine type the key size must be up to 8 bytes. + * For keys with size bigger than 8 bytes, engine type must + * be set to exact match. + */ + if (size > 8) + return PP2_CLS_TBL_EXACT_MATCH; + + return PP2_CLS_TBL_MASKABLE; +} + +static int +mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key; + int ret; + + if (priv->cls_tbl) { + pp2_cls_tbl_deinit(priv->cls_tbl); + priv->cls_tbl = NULL; + } + + memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params)); + + priv->cls_tbl_params.type = mrvl_engine_type(first_flow); + RTE_LOG(INFO, PMD, "Setting cls search engine type to %s\n", + priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ? + "exact" : "maskable"); + priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES; + priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE; + priv->cls_tbl_params.default_act.cos = &first_flow->cos; + + if (first_flow->pattern & F_DMAC) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH; + key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA; + key->key_size += 6; + key->num_fields += 1; + } + + if (first_flow->pattern & F_SMAC) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH; + key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA; + key->key_size += 6; + key->num_fields += 1; + } + + if (first_flow->pattern & F_TYPE) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH; + key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_VLAN_ID) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN; + key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_VLAN_PRI) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN; + key->proto_field[key->num_fields].field.vlan = + MV_NET_VLAN_F_PRI; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_TOS) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_TOS; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_SIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA; + key->key_size += 4; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_DIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA; + key->key_size += 4; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP4_PROTO) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4; + key->proto_field[key->num_fields].field.ipv4 = + MV_NET_IP4_F_PROTO; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_SIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA; + key->key_size += 16; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_DIP) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA; + key->key_size += 16; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_FLOW) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = + MV_NET_IP6_F_FLOW; + key->key_size += 3; + key->num_fields += 1; + } + + if (first_flow->pattern & F_IP6_NEXT_HDR) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6; + key->proto_field[key->num_fields].field.ipv6 = + MV_NET_IP6_F_NEXT_HDR; + key->key_size += 1; + key->num_fields += 1; + } + + if (first_flow->pattern & F_TCP_SPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP; + key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_TCP_DPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP; + key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_UDP_SPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP; + key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP; + key->key_size += 2; + key->num_fields += 1; + } + + if (first_flow->pattern & F_UDP_DPORT) { + key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP; + key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP; + key->key_size += 2; + key->num_fields += 1; + } + + ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl); + if (!ret) + priv->cls_tbl_pattern = first_flow->pattern; + + return ret; +} + +/** + * Check whether new flow can be added to the table + * + * @param priv Pointer to the port's private data. + * @param flow Pointer to the new flow. + * @return 1 in case flow can be added, 0 otherwise. + */ +static inline int +mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow) +{ + return flow->pattern == priv->cls_tbl_pattern && + mrvl_engine_type(flow) == priv->cls_tbl_params.type; +} + +/** + * DPDK flow create callback called when flow is to be created. + * + * @param dev Pointer to the device. + * @param attr Pointer to the flow attribute. + * @param pattern Pointer to the flow pattern. + * @param actions Pointer to the flow actions. + * @param error Pointer to the flow error. + * @returns Pointer to the created flow in case of success, NULL otherwise. + */ +static struct rte_flow * +mrvl_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct rte_flow *flow, *first; + int ret; + + if (!dev->data->dev_started) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Port must be started first\n"); + return NULL; + } + + flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id()); + if (!flow) + return NULL; + + ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error); + if (ret) + goto out; + + /* + * Four cases here: + * + * 1. In case table does not exist - create one. + * 2. In case table exists, is empty and new flow cannot be added + * recreate table. + * 3. In case table is not empty and new flow matches table format + * add it. + * 4. Otherwise flow cannot be added. + */ + first = LIST_FIRST(&priv->flows); + if (!priv->cls_tbl) { + ret = mrvl_create_cls_table(dev, flow); + } else if (!first && !mrvl_flow_can_be_added(priv, flow)) { + ret = mrvl_create_cls_table(dev, flow); + } else if (mrvl_flow_can_be_added(priv, flow)) { + ret = 0; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Pattern does not match cls table format\n"); + goto out; + } + + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to create cls table\n"); + goto out; + } + + ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to add rule\n"); + goto out; + } + + LIST_INSERT_HEAD(&priv->flows, flow, next); + + return flow; +out: + rte_free(flow); + return NULL; +} + +/** + * Remove classifier rule associated with given flow. + * + * @param priv Pointer to the port's private data. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret; + + if (!priv->cls_tbl) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Classifier table not initialized"); + return -rte_errno; + } + + ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule); + if (ret) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to remove rule"); + return -rte_errno; + } + + mrvl_free_all_key_mask(&flow->rule); + + return 0; +} + +/** + * DPDK flow destroy callback called when flow is to be removed. + * + * @param priv Pointer to the port's private data. + * @param flow Pointer to the flow. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + struct rte_flow *f; + int ret; + + LIST_FOREACH(f, &priv->flows, next) { + if (f == flow) + break; + } + + if (!flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Rule was not found"); + return -rte_errno; + } + + LIST_REMOVE(f, next); + + ret = mrvl_flow_remove(priv, flow, error); + if (ret) + return ret; + + rte_free(flow); + + return 0; +} + +/** + * DPDK flow callback called to verify given attribute, pattern and actions. + * + * @param dev Pointer to the device. + * @param attr Pointer to the flow attribute. + * @param pattern Pointer to the flow pattern. + * @param actions Pointer to the flow actions. + * @param error Pointer to the flow error. + * @returns 0 on success, negative value otherwise. + */ +static int +mrvl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + static struct rte_flow *flow; + + flow = mrvl_flow_create(dev, attr, pattern, actions, error); + if (!flow) + return -rte_errno; + + mrvl_flow_destroy(dev, flow, error); + + return 0; +} + +/** + * DPDK flow flush callback called when flows are to be flushed. + * + * @param dev Pointer to the device. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + while (!LIST_EMPTY(&priv->flows)) { + struct rte_flow *flow = LIST_FIRST(&priv->flows); + int ret = mrvl_flow_remove(priv, flow, error); + if (ret) + return ret; + + LIST_REMOVE(flow, next); + rte_free(flow); + } + + return 0; +} + +/** + * DPDK flow isolate callback called to isolate port. + * + * @param dev Pointer to the device. + * @param enable Pass 0/1 to disable/enable port isolation. + * @param error Pointer to the flow error. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_flow_isolate(struct rte_eth_dev *dev, int enable, + struct rte_flow_error *error) +{ + struct mrvl_priv *priv = dev->data->dev_private; + + if (dev->data->dev_started) { + rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Port must be stopped first\n"); + return -rte_errno; + } + + priv->isolated = enable; + + return 0; +} + +const struct rte_flow_ops mrvl_flow_ops = { + .validate = mrvl_flow_validate, + .create = mrvl_flow_create, + .destroy = mrvl_flow_destroy, + .flush = mrvl_flow_flush, + .isolate = mrvl_flow_isolate +}; diff --git a/drivers/net/mvpp2/mrvl_qos.c b/drivers/net/mvpp2/mrvl_qos.c new file mode 100644 index 00000000..70d000ca --- /dev/null +++ b/drivers/net/mvpp2/mrvl_qos.c @@ -0,0 +1,894 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Unluckily, container_of is defined by both DPDK and MUSDK, + * we'll declare only one version. + * + * Note that it is not used in this PMD anyway. + */ +#ifdef container_of +#undef container_of +#endif + +#include "mrvl_qos.h" + +/* Parsing tokens. Defined conveniently, so that any correction is easy. */ +#define MRVL_TOK_DEFAULT "default" +#define MRVL_TOK_DEFAULT_TC "default_tc" +#define MRVL_TOK_DSCP "dscp" +#define MRVL_TOK_MAPPING_PRIORITY "mapping_priority" +#define MRVL_TOK_IP "ip" +#define MRVL_TOK_IP_VLAN "ip/vlan" +#define MRVL_TOK_PCP "pcp" +#define MRVL_TOK_PORT "port" +#define MRVL_TOK_RXQ "rxq" +#define MRVL_TOK_TC "tc" +#define MRVL_TOK_TXQ "txq" +#define MRVL_TOK_VLAN "vlan" +#define MRVL_TOK_VLAN_IP "vlan/ip" + +/* egress specific configuration tokens */ +#define MRVL_TOK_BURST_SIZE "burst_size" +#define MRVL_TOK_RATE_LIMIT "rate_limit" +#define MRVL_TOK_RATE_LIMIT_ENABLE "rate_limit_enable" +#define MRVL_TOK_SCHED_MODE "sched_mode" +#define MRVL_TOK_SCHED_MODE_SP "sp" +#define MRVL_TOK_SCHED_MODE_WRR "wrr" +#define MRVL_TOK_WRR_WEIGHT "wrr_weight" + +/* policer specific configuration tokens */ +#define MRVL_TOK_PLCR_ENABLE "policer_enable" +#define MRVL_TOK_PLCR_UNIT "token_unit" +#define MRVL_TOK_PLCR_UNIT_BYTES "bytes" +#define MRVL_TOK_PLCR_UNIT_PACKETS "packets" +#define MRVL_TOK_PLCR_COLOR "color_mode" +#define MRVL_TOK_PLCR_COLOR_BLIND "blind" +#define MRVL_TOK_PLCR_COLOR_AWARE "aware" +#define MRVL_TOK_PLCR_CIR "cir" +#define MRVL_TOK_PLCR_CBS "cbs" +#define MRVL_TOK_PLCR_EBS "ebs" +#define MRVL_TOK_PLCR_DEFAULT_COLOR "default_color" +#define MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN "green" +#define MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW "yellow" +#define MRVL_TOK_PLCR_DEFAULT_COLOR_RED "red" + +/** Number of tokens in range a-b = 2. */ +#define MAX_RNG_TOKENS 2 + +/** Maximum possible value of PCP. */ +#define MAX_PCP 7 + +/** Maximum possible value of DSCP. */ +#define MAX_DSCP 63 + +/** Global QoS configuration. */ +struct mrvl_qos_cfg *mrvl_qos_cfg; + +/** + * Convert string to uint32_t with extra checks for result correctness. + * + * @param string String to convert. + * @param val Conversion result. + * @returns 0 in case of success, negative value otherwise. + */ +static int +get_val_securely(const char *string, uint32_t *val) +{ + char *endptr; + size_t len = strlen(string); + + if (len == 0) + return -1; + + errno = 0; + *val = strtoul(string, &endptr, 0); + if (errno != 0 || RTE_PTR_DIFF(endptr, string) != len) + return -2; + + return 0; +} + +/** + * Read out-queue configuration from file. + * + * @param file Path to the configuration file. + * @param port Port number. + * @param outq Out queue number. + * @param cfg Pointer to the Marvell QoS configuration structure. + * @returns 0 in case of success, negative value otherwise. + */ +static int +get_outq_cfg(struct rte_cfgfile *file, int port, int outq, + struct mrvl_qos_cfg *cfg) +{ + char sec_name[32]; + const char *entry; + uint32_t val; + + snprintf(sec_name, sizeof(sec_name), "%s %d %s %d", + MRVL_TOK_PORT, port, MRVL_TOK_TXQ, outq); + + /* Skip non-existing */ + if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0) + return 0; + + /* Read scheduling mode */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_SCHED_MODE); + if (entry) { + if (!strncmp(entry, MRVL_TOK_SCHED_MODE_SP, + strlen(MRVL_TOK_SCHED_MODE_SP))) { + cfg->port[port].outq[outq].sched_mode = + PP2_PPIO_SCHED_M_SP; + } else if (!strncmp(entry, MRVL_TOK_SCHED_MODE_WRR, + strlen(MRVL_TOK_SCHED_MODE_WRR))) { + cfg->port[port].outq[outq].sched_mode = + PP2_PPIO_SCHED_M_WRR; + } else { + RTE_LOG(ERR, PMD, "Unknown token: %s\n", entry); + return -1; + } + } + + /* Read wrr weight */ + if (cfg->port[port].outq[outq].sched_mode == PP2_PPIO_SCHED_M_WRR) { + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_WRR_WEIGHT); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].weight = val; + } + } + + /* + * There's no point in setting rate limiting for specific outq as + * global port rate limiting has priority. + */ + if (cfg->port[port].rate_limit_enable) { + RTE_LOG(WARNING, PMD, "Port %d rate limiting already enabled\n", + port); + return 0; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_RATE_LIMIT_ENABLE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].rate_limit_enable = val; + } + + if (!cfg->port[port].outq[outq].rate_limit_enable) + return 0; + + /* Read CBS (in kB) */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_BURST_SIZE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].rate_limit_params.cbs = val; + } + + /* Read CIR (in kbps) */ + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RATE_LIMIT); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + cfg->port[port].outq[outq].rate_limit_params.cir = val; + } + + return 0; +} + +/** + * Gets multiple-entry values and places them in table. + * + * Entry can be anything, e.g. "1 2-3 5 6 7-9". This needs to be converted to + * table entries, respectively: {1, 2, 3, 5, 6, 7, 8, 9}. + * As all result table's elements are always 1-byte long, we + * won't overcomplicate the function, but we'll keep API generic, + * check if someone hasn't changed element size and make it simple + * to extend to other sizes. + * + * This function is purely utilitary, it does not print any error, only returns + * different error numbers. + * + * @param entry[in] Values string to parse. + * @param tab[out] Results table. + * @param elem_sz[in] Element size (in bytes). + * @param max_elems[in] Number of results table elements available. + * @param max val[in] Maximum value allowed. + * @returns Number of correctly parsed elements in case of success. + * @retval -1 Wrong element size. + * @retval -2 More tokens than result table allows. + * @retval -3 Wrong range syntax. + * @retval -4 Wrong range values. + * @retval -5 Maximum value exceeded. + */ +static int +get_entry_values(const char *entry, uint8_t *tab, + size_t elem_sz, uint8_t max_elems, uint8_t max_val) +{ + /* There should not be more tokens than max elements. + * Add 1 for error trap. + */ + char *tokens[max_elems + 1]; + + /* Begin, End + error trap = 3. */ + char *rng_tokens[MAX_RNG_TOKENS + 1]; + long beg, end; + uint32_t token_val; + int nb_tokens, nb_rng_tokens; + int i; + int values = 0; + char val; + char entry_cpy[CFG_VALUE_LEN]; + + if (elem_sz != 1) + return -1; + + /* Copy the entry to safely use rte_strsplit(). */ + strlcpy(entry_cpy, entry, RTE_DIM(entry_cpy)); + + /* + * If there are more tokens than array size, rte_strsplit will + * not return error, just array size. + */ + nb_tokens = rte_strsplit(entry_cpy, strlen(entry_cpy), + tokens, max_elems + 1, ' '); + + /* Quick check, will be refined later. */ + if (nb_tokens > max_elems) + return -2; + + for (i = 0; i < nb_tokens; ++i) { + if (strchr(tokens[i], '-') != NULL) { + /* + * Split to begin and end tokens. + * We want to catch error cases too, thus we leave + * option for number of tokens to be more than 2. + */ + nb_rng_tokens = rte_strsplit(tokens[i], + strlen(tokens[i]), rng_tokens, + RTE_DIM(rng_tokens), '-'); + if (nb_rng_tokens != 2) + return -3; + + /* Range and sanity checks. */ + if (get_val_securely(rng_tokens[0], &token_val) < 0) + return -4; + beg = (char)token_val; + if (get_val_securely(rng_tokens[1], &token_val) < 0) + return -4; + end = (char)token_val; + if (beg < 0 || beg > UCHAR_MAX || + end < 0 || end > UCHAR_MAX || end < beg) + return -4; + + for (val = beg; val <= end; ++val) { + if (val > max_val) + return -5; + + *tab = val; + tab = RTE_PTR_ADD(tab, elem_sz); + ++values; + if (values >= max_elems) + return -2; + } + } else { + /* Single values. */ + if (get_val_securely(tokens[i], &token_val) < 0) + return -5; + val = (char)token_val; + if (val > max_val) + return -5; + + *tab = val; + tab = RTE_PTR_ADD(tab, elem_sz); + ++values; + if (values >= max_elems) + return -2; + } + } + + return values; +} + +/** + * Parse Traffic Class'es mapping configuration. + * + * @param file Config file handle. + * @param port Which port to look for. + * @param tc Which Traffic Class to look for. + * @param cfg[out] Parsing results. + * @returns 0 in case of success, negative value otherwise. + */ +static int +parse_tc_cfg(struct rte_cfgfile *file, int port, int tc, + struct mrvl_qos_cfg *cfg) +{ + char sec_name[32]; + const char *entry; + int n; + + snprintf(sec_name, sizeof(sec_name), "%s %d %s %d", + MRVL_TOK_PORT, port, MRVL_TOK_TC, tc); + + /* Skip non-existing */ + if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0) + return 0; + + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RXQ); + if (entry) { + n = get_entry_values(entry, + cfg->port[port].tc[tc].inq, + sizeof(cfg->port[port].tc[tc].inq[0]), + RTE_DIM(cfg->port[port].tc[tc].inq), + MRVL_PP2_RXQ_MAX); + if (n < 0) { + RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n", + n, entry); + return n; + } + cfg->port[port].tc[tc].inqs = n; + } + + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_PCP); + if (entry) { + n = get_entry_values(entry, + cfg->port[port].tc[tc].pcp, + sizeof(cfg->port[port].tc[tc].pcp[0]), + RTE_DIM(cfg->port[port].tc[tc].pcp), + MAX_PCP); + if (n < 0) { + RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n", + n, entry); + return n; + } + cfg->port[port].tc[tc].pcps = n; + } + + entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_DSCP); + if (entry) { + n = get_entry_values(entry, + cfg->port[port].tc[tc].dscp, + sizeof(cfg->port[port].tc[tc].dscp[0]), + RTE_DIM(cfg->port[port].tc[tc].dscp), + MAX_DSCP); + if (n < 0) { + RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n", + n, entry); + return n; + } + cfg->port[port].tc[tc].dscps = n; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_DEFAULT_COLOR); + if (entry) { + if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN, + sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN))) { + cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_GREEN; + } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW, + sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW))) { + cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_YELLOW; + } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_RED, + sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_RED))) { + cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_RED; + } else { + RTE_LOG(ERR, PMD, "Error while parsing: %s\n", entry); + return -1; + } + } + + return 0; +} + +/** + * Parse QoS configuration - rte_kvargs_process handler. + * + * Opens configuration file and parses its content. + * + * @param key Unused. + * @param path Path to config file. + * @param extra_args Pointer to configuration structure. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_get_qoscfg(const char *key __rte_unused, const char *path, + void *extra_args) +{ + struct mrvl_qos_cfg **cfg = extra_args; + struct rte_cfgfile *file = rte_cfgfile_load(path, 0); + uint32_t val; + int n, i, ret; + const char *entry; + char sec_name[32]; + + if (file == NULL) + rte_exit(EXIT_FAILURE, "Cannot load configuration %s\n", path); + + /* Create configuration. This is never accessed on the fast path, + * so we can ignore socket. + */ + *cfg = rte_zmalloc("mrvl_qos_cfg", sizeof(struct mrvl_qos_cfg), 0); + if (*cfg == NULL) + rte_exit(EXIT_FAILURE, "Cannot allocate configuration %s\n", + path); + + n = rte_cfgfile_num_sections(file, MRVL_TOK_PORT, + sizeof(MRVL_TOK_PORT) - 1); + + if (n == 0) { + /* This is weird, but not bad. */ + RTE_LOG(WARNING, PMD, "Empty configuration file?\n"); + return 0; + } + + /* Use the number of ports given as vdev parameters. */ + for (n = 0; n < (PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC); ++n) { + snprintf(sec_name, sizeof(sec_name), "%s %d %s", + MRVL_TOK_PORT, n, MRVL_TOK_DEFAULT); + + /* Skip ports non-existing in configuration. */ + if (rte_cfgfile_num_sections(file, sec_name, + strlen(sec_name)) <= 0) { + (*cfg)->port[n].use_global_defaults = 1; + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_IP_PRI; + continue; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_DEFAULT_TC); + if (entry) { + if (get_val_securely(entry, &val) < 0 || + val > USHRT_MAX) + return -1; + (*cfg)->port[n].default_tc = (uint8_t)val; + } else { + RTE_LOG(ERR, PMD, + "Default Traffic Class required in custom configuration!\n"); + return -1; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_ENABLE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].policer_enable = val; + } + + if ((*cfg)->port[n].policer_enable) { + enum pp2_cls_plcr_token_unit unit; + + /* Read policer token unit */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_UNIT); + if (entry) { + if (!strncmp(entry, MRVL_TOK_PLCR_UNIT_BYTES, + sizeof(MRVL_TOK_PLCR_UNIT_BYTES))) { + unit = PP2_CLS_PLCR_BYTES_TOKEN_UNIT; + } else if (!strncmp(entry, + MRVL_TOK_PLCR_UNIT_PACKETS, + sizeof(MRVL_TOK_PLCR_UNIT_PACKETS))) { + unit = PP2_CLS_PLCR_PACKETS_TOKEN_UNIT; + } else { + RTE_LOG(ERR, PMD, "Unknown token: %s\n", + entry); + return -1; + } + (*cfg)->port[n].policer_params.token_unit = + unit; + } + + /* Read policer color mode */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_COLOR); + if (entry) { + enum pp2_cls_plcr_color_mode mode; + + if (!strncmp(entry, MRVL_TOK_PLCR_COLOR_BLIND, + sizeof(MRVL_TOK_PLCR_COLOR_BLIND))) { + mode = PP2_CLS_PLCR_COLOR_BLIND_MODE; + } else if (!strncmp(entry, + MRVL_TOK_PLCR_COLOR_AWARE, + sizeof(MRVL_TOK_PLCR_COLOR_AWARE))) { + mode = PP2_CLS_PLCR_COLOR_AWARE_MODE; + } else { + RTE_LOG(ERR, PMD, + "Error in parsing: %s\n", + entry); + return -1; + } + (*cfg)->port[n].policer_params.color_mode = + mode; + } + + /* Read policer cir */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_CIR); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].policer_params.cir = val; + } + + /* Read policer cbs */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_CBS); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].policer_params.cbs = val; + } + + /* Read policer ebs */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_PLCR_EBS); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].policer_params.ebs = val; + } + } + + /* + * Read per-port rate limiting. Setting that will + * disable per-queue rate limiting. + */ + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_RATE_LIMIT_ENABLE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].rate_limit_enable = val; + } + + if ((*cfg)->port[n].rate_limit_enable) { + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_BURST_SIZE); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].rate_limit_params.cbs = val; + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_RATE_LIMIT); + if (entry) { + if (get_val_securely(entry, &val) < 0) + return -1; + (*cfg)->port[n].rate_limit_params.cir = val; + } + } + + entry = rte_cfgfile_get_entry(file, sec_name, + MRVL_TOK_MAPPING_PRIORITY); + if (entry) { + if (!strncmp(entry, MRVL_TOK_VLAN_IP, + sizeof(MRVL_TOK_VLAN_IP))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_IP_PRI; + else if (!strncmp(entry, MRVL_TOK_IP_VLAN, + sizeof(MRVL_TOK_IP_VLAN))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_IP_VLAN_PRI; + else if (!strncmp(entry, MRVL_TOK_IP, + sizeof(MRVL_TOK_IP))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_IP_PRI; + else if (!strncmp(entry, MRVL_TOK_VLAN, + sizeof(MRVL_TOK_VLAN))) + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_PRI; + else + rte_exit(EXIT_FAILURE, + "Error in parsing %s value (%s)!\n", + MRVL_TOK_MAPPING_PRIORITY, entry); + } else { + (*cfg)->port[n].mapping_priority = + PP2_CLS_QOS_TBL_VLAN_IP_PRI; + } + + for (i = 0; i < MRVL_PP2_RXQ_MAX; ++i) { + ret = get_outq_cfg(file, n, i, *cfg); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Error %d parsing port %d outq %d!\n", + ret, n, i); + } + + for (i = 0; i < MRVL_PP2_TC_MAX; ++i) { + ret = parse_tc_cfg(file, n, i, *cfg); + if (ret < 0) + rte_exit(EXIT_FAILURE, + "Error %d parsing port %d tc %d!\n", + ret, n, i); + } + } + + return 0; +} + +/** + * Setup Traffic Class. + * + * Fill in TC parameters in single MUSDK TC config entry. + * @param param TC parameters entry. + * @param inqs Number of MUSDK in-queues in this TC. + * @param bpool Bpool for this TC. + * @param color Default color for this TC. + * @returns 0 in case of success, exits otherwise. + */ +static int +setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs, + struct pp2_bpool *bpool, enum pp2_ppio_color color) +{ + struct pp2_ppio_inq_params *inq_params; + + param->pkt_offset = MRVL_PKT_OFFS; + param->pools[0] = bpool; + param->default_color = color; + + inq_params = rte_zmalloc_socket("inq_params", + inqs * sizeof(*inq_params), + 0, rte_socket_id()); + if (!inq_params) + return -ENOMEM; + + param->num_in_qs = inqs; + + /* Release old config if necessary. */ + if (param->inqs_params) + rte_free(param->inqs_params); + + param->inqs_params = inq_params; + + return 0; +} + +/** + * Setup ingress policer. + * + * @param priv Port's private data. + * @param params Pointer to the policer's configuration. + * @returns 0 in case of success, negative values otherwise. + */ +static int +setup_policer(struct mrvl_priv *priv, struct pp2_cls_plcr_params *params) +{ + char match[16]; + int ret; + + snprintf(match, sizeof(match), "policer-%d:%d\n", + priv->pp_id, priv->ppio_id); + params->match = match; + + ret = pp2_cls_plcr_init(params, &priv->policer); + if (ret) { + RTE_LOG(ERR, PMD, "Failed to setup %s\n", match); + return -1; + } + + priv->ppio_params.inqs_params.plcr = priv->policer; + + return 0; +} + +/** + * Configure RX Queues in a given port. + * + * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues) +{ + size_t i, tc; + + if (mrvl_qos_cfg == NULL || + mrvl_qos_cfg->port[portid].use_global_defaults) { + /* + * No port configuration, use default: 1 TC, no QoS, + * TC color set to green. + */ + priv->ppio_params.inqs_params.num_tcs = 1; + setup_tc(&priv->ppio_params.inqs_params.tcs_params[0], + max_queues, priv->bpool, PP2_PPIO_COLOR_GREEN); + + /* Direct mapping of queues i.e. 0->0, 1->1 etc. */ + for (i = 0; i < max_queues; ++i) { + priv->rxq_map[i].tc = 0; + priv->rxq_map[i].inq = i; + } + return 0; + } + + /* We need only a subset of configuration. */ + struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid]; + + priv->qos_tbl_params.type = port_cfg->mapping_priority; + + /* + * We need to reverse mapping, from tc->pcp (better from usability + * point of view) to pcp->tc (configurable in MUSDK). + * First, set all map elements to "default". + */ + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i) + priv->qos_tbl_params.pcp_cos_map[i].tc = port_cfg->default_tc; + + /* Then, fill in all known values. */ + for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { + if (port_cfg->tc[tc].pcps > RTE_DIM(port_cfg->tc[0].pcp)) { + /* Better safe than sorry. */ + RTE_LOG(ERR, PMD, + "Too many PCPs configured in TC %zu!\n", tc); + return -1; + } + for (i = 0; i < port_cfg->tc[tc].pcps; ++i) { + priv->qos_tbl_params.pcp_cos_map[ + port_cfg->tc[tc].pcp[i]].tc = tc; + } + } + + /* + * The same logic goes with DSCP. + * First, set all map elements to "default". + */ + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i) + priv->qos_tbl_params.dscp_cos_map[i].tc = + port_cfg->default_tc; + + /* Fill in all known values. */ + for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { + if (port_cfg->tc[tc].dscps > RTE_DIM(port_cfg->tc[0].dscp)) { + /* Better safe than sorry. */ + RTE_LOG(ERR, PMD, + "Too many DSCPs configured in TC %zu!\n", tc); + return -1; + } + for (i = 0; i < port_cfg->tc[tc].dscps; ++i) { + priv->qos_tbl_params.dscp_cos_map[ + port_cfg->tc[tc].dscp[i]].tc = tc; + } + } + + /* + * Surprisingly, similar logic goes with queue mapping. + * We need only to store qid->tc mapping, + * to know TC when queue is read. + */ + for (i = 0; i < RTE_DIM(priv->rxq_map); ++i) + priv->rxq_map[i].tc = MRVL_UNKNOWN_TC; + + /* Set up DPDKq->(TC,inq) mapping. */ + for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) { + if (port_cfg->tc[tc].inqs > RTE_DIM(port_cfg->tc[0].inq)) { + /* Overflow. */ + RTE_LOG(ERR, PMD, + "Too many RX queues configured per TC %zu!\n", + tc); + return -1; + } + for (i = 0; i < port_cfg->tc[tc].inqs; ++i) { + uint8_t idx = port_cfg->tc[tc].inq[i]; + + if (idx > RTE_DIM(priv->rxq_map)) { + RTE_LOG(ERR, PMD, "Bad queue index %d!\n", idx); + return -1; + } + + priv->rxq_map[idx].tc = tc; + priv->rxq_map[idx].inq = i; + } + } + + /* + * Set up TC configuration. TCs need to be sequenced: 0, 1, 2 + * with no gaps. Empty TC means end of processing. + */ + for (i = 0; i < MRVL_PP2_TC_MAX; ++i) { + if (port_cfg->tc[i].inqs == 0) + break; + setup_tc(&priv->ppio_params.inqs_params.tcs_params[i], + port_cfg->tc[i].inqs, + priv->bpool, port_cfg->tc[i].color); + } + + priv->ppio_params.inqs_params.num_tcs = i; + + if (port_cfg->policer_enable) + return setup_policer(priv, &port_cfg->policer_params); + + return 0; +} + +/** + * Configure TX Queues in a given port. + * + * Sets up TX queues egress scheduler and limiter. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues) +{ + /* We need only a subset of configuration. */ + struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid]; + int i; + + if (mrvl_qos_cfg == NULL) + return 0; + + priv->ppio_params.rate_limit_enable = port_cfg->rate_limit_enable; + if (port_cfg->rate_limit_enable) + priv->ppio_params.rate_limit_params = + port_cfg->rate_limit_params; + + for (i = 0; i < max_queues; i++) { + struct pp2_ppio_outq_params *params = + &priv->ppio_params.outqs_params.outqs_params[i]; + + params->sched_mode = port_cfg->outq[i].sched_mode; + params->weight = port_cfg->outq[i].weight; + params->rate_limit_enable = port_cfg->outq[i].rate_limit_enable; + params->rate_limit_params = port_cfg->outq[i].rate_limit_params; + } + + return 0; +} + +/** + * Start QoS mapping. + * + * Finalize QoS table configuration and initialize it in SDK. It can be done + * only after port is started, so we have a valid ppio reference. + * + * @param priv Port's private (configuration) data. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_start_qos_mapping(struct mrvl_priv *priv) +{ + size_t i; + + if (priv->ppio == NULL) { + RTE_LOG(ERR, PMD, "ppio must not be NULL here!\n"); + return -1; + } + + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.pcp_cos_map); ++i) + priv->qos_tbl_params.pcp_cos_map[i].ppio = priv->ppio; + + for (i = 0; i < RTE_DIM(priv->qos_tbl_params.dscp_cos_map); ++i) + priv->qos_tbl_params.dscp_cos_map[i].ppio = priv->ppio; + + /* Initialize Classifier QoS table. */ + + return pp2_cls_qos_tbl_init(&priv->qos_tbl_params, &priv->qos_tbl); +} diff --git a/drivers/net/mvpp2/mrvl_qos.h b/drivers/net/mvpp2/mrvl_qos.h new file mode 100644 index 00000000..fa9ddecb --- /dev/null +++ b/drivers/net/mvpp2/mrvl_qos.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#ifndef _MRVL_QOS_H_ +#define _MRVL_QOS_H_ + +#include + +#include "mrvl_ethdev.h" + +/** Code Points per Traffic Class. Equals max(DSCP, PCP). */ +#define MRVL_CP_PER_TC (64) + +/** Value used as "unknown". */ +#define MRVL_UNKNOWN_TC (0xFF) + +/* QoS config. */ +struct mrvl_qos_cfg { + struct port_cfg { + int rate_limit_enable; + struct pp2_ppio_rate_limit_params rate_limit_params; + struct { + uint8_t inq[MRVL_PP2_RXQ_MAX]; + uint8_t dscp[MRVL_CP_PER_TC]; + uint8_t pcp[MRVL_CP_PER_TC]; + uint8_t inqs; + uint8_t dscps; + uint8_t pcps; + enum pp2_ppio_color color; + } tc[MRVL_PP2_TC_MAX]; + struct { + enum pp2_ppio_outq_sched_mode sched_mode; + uint8_t weight; + int rate_limit_enable; + struct pp2_ppio_rate_limit_params rate_limit_params; + } outq[MRVL_PP2_RXQ_MAX]; + enum pp2_cls_qos_tbl_type mapping_priority; + uint16_t inqs; + uint16_t outqs; + uint8_t default_tc; + uint8_t use_global_defaults; + struct pp2_cls_plcr_params policer_params; + uint8_t policer_enable; + } port[RTE_MAX_ETHPORTS]; +}; + +/** Global QoS configuration. */ +extern struct mrvl_qos_cfg *mrvl_qos_cfg; + +/** + * Parse QoS configuration - rte_kvargs_process handler. + * + * Opens configuration file and parses its content. + * + * @param key Unused. + * @param path Path to config file. + * @param extra_args Pointer to configuration structure. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_get_qoscfg(const char *key __rte_unused, const char *path, + void *extra_args); + +/** + * Configure RX Queues in a given port. + * + * Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues); + +/** + * Configure TX Queues in a given port. + * + * Sets up TX queues egress scheduler and limiter. + * + * @param priv Port's private data + * @param portid DPDK port ID + * @param max_queues Maximum number of queues to configure. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid, + uint16_t max_queues); + +/** + * Start QoS mapping. + * + * Finalize QoS table configuration and initialize it in SDK. It can be done + * only after port is started, so we have a valid ppio reference. + * + * @param priv Port's private (configuration) data. + * @returns 0 in case of success, exits otherwise. + */ +int +mrvl_start_qos_mapping(struct mrvl_priv *priv); + +#endif /* _MRVL_QOS_H_ */ diff --git a/drivers/net/mvpp2/rte_pmd_mvpp2_version.map b/drivers/net/mvpp2/rte_pmd_mvpp2_version.map new file mode 100644 index 00000000..a7530317 --- /dev/null +++ b/drivers/net/mvpp2/rte_pmd_mvpp2_version.map @@ -0,0 +1,3 @@ +DPDK_17.11 { + local: *; +}; diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile index aa3b68a4..ab4e0a7d 100644 --- a/drivers/net/nfp/Makefile +++ b/drivers/net/nfp/Makefile @@ -20,11 +20,24 @@ EXPORT_MAP := rte_pmd_nfp_version.map LIBABIVER := 1 +VPATH += $(SRCDIR)/nfpcore + +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cppcore.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cpp_pcie_ops.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mutex.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_resource.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_crc.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mip.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nffw.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_hwinfo.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_rtsym.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_cmds.c +SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_eth.c + # # all source are stored in SRCS-y # SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c -SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nfpu.c -SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nspu.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index e5bfde62..faad1ee9 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015 Netronome Systems, Inc. + * Copyright (c) 2014-2018 Netronome Systems, Inc. * All rights reserved. * * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation. @@ -55,7 +55,13 @@ #include #include -#include "nfp_nfpu.h" +#include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nffw.h" +#include "nfpcore/nfp_hwinfo.h" +#include "nfpcore/nfp_mip.h" +#include "nfpcore/nfp_rtsym.h" +#include "nfpcore/nfp_nsp.h" + #include "nfp_net_pmd.h" #include "nfp_net_logs.h" #include "nfp_net_ctrl.h" @@ -103,13 +109,11 @@ static int nfp_net_rss_reta_write(struct rte_eth_dev *dev, uint16_t reta_size); static int nfp_net_rss_hash_write(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); +static int nfp_set_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr); -/* - * The offset of the queue controller queues in the PCIe Target. These - * happen to be at the same offset on the NFP6000 and the NFP3200 so - * we use a single macro here. - */ -#define NFP_PCIE_QUEUE(_q) (0x800 * ((_q) & 0xff)) +/* The offset of the queue controller queues in the PCIe Target */ +#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff))) /* Maximum value which can be added to a queue with one transaction */ #define NFP_QCP_MAX_ADD 0x7f @@ -213,57 +217,6 @@ nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val) nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off); } -/* - * Atomically reads link status information from global structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &dev->data->dev_link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/* - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - static void nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq) { @@ -310,7 +263,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) for (i = 0; i < txq->tx_count; i++) { if (txq->txbufs[i].mbuf) { - rte_pktmbuf_free(txq->txbufs[i].mbuf); + rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); txq->txbufs[i].mbuf = NULL; } } @@ -343,7 +296,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) uint32_t new; struct timespec wait; - PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n", + PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...", hw->qcp_cfg); if (hw->qcp_cfg == NULL) @@ -354,7 +307,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) wait.tv_sec = 0; wait.tv_nsec = 1000000; - PMD_DRV_LOG(DEBUG, "Polling for update ack...\n"); + PMD_DRV_LOG(DEBUG, "Polling for update ack..."); /* Poll update field, waiting for NFP to ack the config */ for (cnt = 0; ; cnt++) { @@ -372,7 +325,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update) } nanosleep(&wait, 0); /* waiting for a 1ms */ } - PMD_DRV_LOG(DEBUG, "Ack DONE\n"); + PMD_DRV_LOG(DEBUG, "Ack DONE"); return 0; } @@ -390,7 +343,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) { uint32_t err; - PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n", + PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x", ctrl, update); rte_spinlock_lock(&hw->reconfig_lock); @@ -427,8 +380,6 @@ nfp_net_configure(struct rte_eth_dev *dev) struct rte_eth_conf *dev_conf; struct rte_eth_rxmode *rxmode; struct rte_eth_txmode *txmode; - uint32_t new_ctrl = 0; - uint32_t update = 0; struct nfp_net_hw *hw; hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -454,96 +405,15 @@ nfp_net_configure(struct rte_eth_dev *dev) } /* Checking RX mode */ - if (rxmode->mq_mode & ETH_MQ_RX_RSS) { - if (hw->cap & NFP_NET_CFG_CTRL_RSS) { - update = NFP_NET_CFG_UPDATE_RSS; - new_ctrl = NFP_NET_CFG_CTRL_RSS; - } else { - PMD_INIT_LOG(INFO, "RSS not supported"); - return -EINVAL; - } - } - - if (rxmode->split_hdr_size) { - PMD_INIT_LOG(INFO, "rxmode does not support split header"); + if (rxmode->mq_mode & ETH_MQ_RX_RSS && + !(hw->cap & NFP_NET_CFG_CTRL_RSS)) { + PMD_INIT_LOG(INFO, "RSS not supported"); return -EINVAL; } - if (rxmode->hw_ip_checksum) { - if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) { - new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM; - } else { - PMD_INIT_LOG(INFO, "RXCSUM not supported"); - return -EINVAL; - } - } - - if (rxmode->hw_vlan_filter) { - PMD_INIT_LOG(INFO, "VLAN filter not supported"); - return -EINVAL; - } - - if (rxmode->hw_vlan_strip) { - if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) { - new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN; - } else { - PMD_INIT_LOG(INFO, "hw vlan strip not supported"); - return -EINVAL; - } - } - - if (rxmode->hw_vlan_extend) { - PMD_INIT_LOG(INFO, "VLAN extended not supported"); - return -EINVAL; - } - - if (rxmode->jumbo_frame) - hw->mtu = rxmode->max_rx_pkt_len; - - if (!rxmode->hw_strip_crc) - PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable"); - - if (rxmode->enable_scatter) { - PMD_INIT_LOG(INFO, "Scatter not supported"); - return -EINVAL; - } - - /* If next capabilities are supported, configure them by default */ - - /* VLAN insertion */ - if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) - new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN; - - /* L2 broadcast */ - if (hw->cap & NFP_NET_CFG_CTRL_L2BC) - new_ctrl |= NFP_NET_CFG_CTRL_L2BC; - - /* L2 multicast */ - if (hw->cap & NFP_NET_CFG_CTRL_L2MC) - new_ctrl |= NFP_NET_CFG_CTRL_L2MC; - - /* TX checksum offload */ - if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM) - new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM; - - /* LSO offload */ - if (hw->cap & NFP_NET_CFG_CTRL_LSO) - new_ctrl |= NFP_NET_CFG_CTRL_LSO; - - /* RX gather */ - if (hw->cap & NFP_NET_CFG_CTRL_GATHER) - new_ctrl |= NFP_NET_CFG_CTRL_GATHER; - - if (!new_ctrl) - return 0; - - update |= NFP_NET_CFG_UPDATE_GEN; - - nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl); - if (nfp_net_reconfig(hw, new_ctrl, update) < 0) - return -EIO; - - hw->ctrl = new_ctrl; + /* Checking RX offloads */ + if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) + PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!"); return 0; } @@ -625,47 +495,29 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw) #define ETH_ADDR_LEN 6 static void -nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src) +nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src) { int i; for (i = 0; i < ETH_ADDR_LEN; i++) - dst[ETH_ADDR_LEN - i - 1] = src[i]; + dst[i] = src[i]; } static int nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port) { - union eth_table_entry *entry; - int idx, i; - - idx = port; - entry = hw->eth_table; - - /* Reading NFP ethernet table obtained before */ - for (i = 0; i < NSP_ETH_MAX_COUNT; i++) { - if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) { - /* port not in use */ - entry++; - continue; - } - if (idx == 0) - break; - idx--; - entry++; - } - - if (i == NSP_ETH_MAX_COUNT) - return -EINVAL; + struct nfp_eth_table *nfp_eth_table; + nfp_eth_table = nfp_eth_read_ports(hw->cpp); /* * hw points to port0 private data. We need hw now pointing to * right port. */ hw += port; - nfp_eth_copy_mac_reverse((uint8_t *)&hw->mac_addr, - (uint8_t *)&entry->mac_addr); + nfp_eth_copy_mac((uint8_t *)&hw->mac_addr, + (uint8_t *)&nfp_eth_table->ports[port].mac_addr); + free(nfp_eth_table); return 0; } @@ -675,7 +527,7 @@ nfp_net_vf_read_mac(struct nfp_net_hw *hw) uint32_t tmp; tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR)); - memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr)); + memcpy(&hw->mac_addr[0], &tmp, 4); tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4)); memcpy(&hw->mac_addr[4], &tmp, 2); @@ -695,6 +547,37 @@ nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac) hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6); } +int +nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) +{ + struct nfp_net_hw *hw; + uint32_t update, ctrl; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) { + PMD_INIT_LOG(INFO, "MAC address unable to change when" + " port enabled"); + return -EBUSY; + } + + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) + return -EBUSY; + + /* Writing new MAC to the specific port BAR address */ + nfp_net_write_mac(hw, (uint8_t *)mac_addr); + + /* Signal the NIC about the change */ + update = NFP_NET_CFG_UPDATE_MACADDR; + ctrl = hw->ctrl | NFP_NET_CFG_CTRL_LIVE_ADDR; + if (nfp_net_reconfig(hw, ctrl, update) < 0) { + PMD_INIT_LOG(INFO, "MAC address update failed"); + return -EIO; + } + return 0; +} + static int nfp_configure_rx_interrupt(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) @@ -729,7 +612,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, */ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1); intr_handle->intr_vec[i] = i + 1; - PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i, + PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i, intr_handle->intr_vec[i]); } } @@ -739,15 +622,75 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev, return 0; } +static uint32_t +nfp_check_offloads(struct rte_eth_dev *dev) +{ + struct nfp_net_hw *hw; + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; + struct rte_eth_txmode *txmode; + uint32_t ctrl = 0; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_conf = &dev->data->dev_conf; + rxmode = &dev_conf->rxmode; + txmode = &dev_conf->txmode; + + if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) { + if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) + ctrl |= NFP_NET_CFG_CTRL_RXCSUM; + } + + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) + ctrl |= NFP_NET_CFG_CTRL_RXVLAN; + } + + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) + hw->mtu = rxmode->max_rx_pkt_len; + + if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT) + ctrl |= NFP_NET_CFG_CTRL_TXVLAN; + + /* L2 broadcast */ + if (hw->cap & NFP_NET_CFG_CTRL_L2BC) + ctrl |= NFP_NET_CFG_CTRL_L2BC; + + /* L2 multicast */ + if (hw->cap & NFP_NET_CFG_CTRL_L2MC) + ctrl |= NFP_NET_CFG_CTRL_L2MC; + + /* TX checksum offload */ + if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM || + txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM || + txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) + ctrl |= NFP_NET_CFG_CTRL_TXCSUM; + + /* LSO offload */ + if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) { + if (hw->cap & NFP_NET_CFG_CTRL_LSO) + ctrl |= NFP_NET_CFG_CTRL_LSO; + else + ctrl |= NFP_NET_CFG_CTRL_LSO2; + } + + /* RX gather */ + if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + ctrl |= NFP_NET_CFG_CTRL_GATHER; + + return ctrl; +} + static int nfp_net_start(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - struct rte_eth_conf *dev_conf; - struct rte_eth_rxmode *rxmode; uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; + struct rte_eth_conf *dev_conf; + struct rte_eth_rxmode *rxmode; uint32_t intr_vector; int ret; @@ -758,9 +701,6 @@ nfp_net_start(struct rte_eth_dev *dev) /* Disabling queues just in case... */ nfp_net_disable_queues(dev); - /* Writing configuration parameters in the device */ - nfp_net_params_setup(hw); - /* Enabling the required queues in the device */ nfp_net_enable_queues(dev); @@ -795,21 +735,22 @@ nfp_net_start(struct rte_eth_dev *dev) rte_intr_enable(intr_handle); + new_ctrl = nfp_check_offloads(dev); + + /* Writing configuration parameters in the device */ + nfp_net_params_setup(hw); + dev_conf = &dev->data->dev_conf; rxmode = &dev_conf->rxmode; - /* Checking RX mode */ if (rxmode->mq_mode & ETH_MQ_RX_RSS) { - if (hw->cap & NFP_NET_CFG_CTRL_RSS) { - if (!nfp_net_rss_config_default(dev)) - update |= NFP_NET_CFG_UPDATE_RSS; - } else { - PMD_INIT_LOG(INFO, "RSS not supported"); - return -EINVAL; - } + nfp_net_rss_config_default(dev); + update |= NFP_NET_CFG_UPDATE_RSS; + new_ctrl |= NFP_NET_CFG_CTRL_RSS; } + /* Enable device */ - new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE; + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING; @@ -831,7 +772,7 @@ nfp_net_start(struct rte_eth_dev *dev) if (hw->is_pf) /* Configure the physical port up */ - nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 1); + nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1); hw->ctrl = new_ctrl; @@ -882,7 +823,7 @@ nfp_net_stop(struct rte_eth_dev *dev) if (hw->is_pf) /* Configure the physical port down */ - nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 0); + nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0); } /* Reset and stop device. The device can not be restarted. */ @@ -936,7 +877,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) uint32_t new_ctrl, update = 0; struct nfp_net_hw *hw; - PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n"); + PMD_DRV_LOG(DEBUG, "Promiscuous mode enable"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -946,7 +887,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev) } if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) { - PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n"); + PMD_DRV_LOG(INFO, "Promiscuous mode already enabled"); return; } @@ -972,7 +913,7 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) { - PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n"); + PMD_DRV_LOG(INFO, "Promiscuous mode already disabled"); return; } @@ -999,8 +940,9 @@ static int nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { struct nfp_net_hw *hw; - struct rte_eth_link link, old; + struct rte_eth_link link; uint32_t nn_link_status; + int ret; static const uint32_t ls_to_ethtool[] = { [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE, @@ -1013,13 +955,10 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G, }; - PMD_DRV_LOG(DEBUG, "Link update\n"); + PMD_DRV_LOG(DEBUG, "Link update"); hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - memset(&old, 0, sizeof(old)); - nfp_net_dev_atomic_read_link_status(dev, &old); - nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS); memset(&link, 0, sizeof(struct rte_eth_link)); @@ -1037,16 +976,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) else link.link_speed = ls_to_ethtool[nn_link_status]; - if (old.link_status != link.link_status) { - nfp_net_dev_atomic_write_link_status(dev, &link); + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == 0) { if (link.link_status) - PMD_DRV_LOG(INFO, "NIC Link is Up\n"); + PMD_DRV_LOG(INFO, "NIC Link is Up"); else - PMD_DRV_LOG(INFO, "NIC Link is Down\n"); - return 0; + PMD_DRV_LOG(INFO, "NIC Link is Down"); } - - return -1; + return ret; } static int @@ -1214,7 +1151,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; dev_info->min_rx_bufsize = ETHER_MIN_MTU; @@ -1230,6 +1166,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; @@ -1238,6 +1176,12 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; + if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + + if (hw->cap & NFP_NET_CFG_CTRL_GATHER) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS; + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = DEFAULT_RX_PTHRESH, @@ -1256,8 +1200,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, }; dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP | @@ -1268,12 +1210,9 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; - dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G | - ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G | - ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G; - - if (hw->cap & NFP_NET_CFG_CTRL_LSO) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; } static const uint32_t * @@ -1376,18 +1315,17 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; - memset(&link, 0, sizeof(link)); - nfp_net_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); if (link.link_status) - RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n", - dev->data->port_id, link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX - ? "full-duplex" : "half-duplex"); + PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", + dev->data->port_id, link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX + ? "full-duplex" : "half-duplex"); else - RTE_LOG(INFO, PMD, " Port %d: Link Down\n", - dev->data->port_id); + PMD_DRV_LOG(INFO, " Port %d: Link Down", + dev->data->port_id); - RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n", + PMD_DRV_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); } @@ -1428,11 +1366,9 @@ nfp_net_dev_interrupt_handler(void *param) struct rte_eth_link link; struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n"); + PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!"); - /* get the link status */ - memset(&link, 0, sizeof(link)); - nfp_net_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); nfp_net_link_update(dev, 0); @@ -1449,7 +1385,7 @@ nfp_net_dev_interrupt_handler(void *param) if (rte_eal_alarm_set(timeout * 1000, nfp_net_dev_interrupt_delayed_handler, (void *)dev) < 0) { - RTE_LOG(ERR, PMD, "Error setting alarm"); + PMD_INIT_LOG(ERR, "Error setting alarm"); /* Unmasking */ nfp_net_irq_unmask(dev); } @@ -1534,7 +1470,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 || (nb_desc > NFP_NET_MAX_RX_DESC) || (nb_desc < NFP_NET_MIN_RX_DESC)) { - RTE_LOG(ERR, PMD, "Wrong nb_desc value\n"); + PMD_DRV_LOG(ERR, "Wrong nb_desc value"); return -EINVAL; } @@ -1572,8 +1508,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_count = nb_desc; rxq->port_id = dev->data->port_id; rxq->rx_free_thresh = rx_conf->rx_free_thresh; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 - : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; /* @@ -1587,7 +1521,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, socket_id); if (tz == NULL) { - RTE_LOG(ERR, PMD, "Error allocatig rx dma\n"); + PMD_DRV_LOG(ERR, "Error allocatig rx dma"); nfp_net_rx_queue_release(rxq); return -ENOMEM; } @@ -1605,7 +1539,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } - PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n", + PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64, rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma); nfp_net_reset_rx_queue(rxq); @@ -1630,7 +1564,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) uint64_t dma_addr; unsigned i; - PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n", + PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors", rxq->rx_count); for (i = 0; i < rxq->rx_count; i++) { @@ -1638,7 +1572,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool); if (mbuf == NULL) { - RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n", + PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u", (unsigned)rxq->qidx); return -ENOMEM; } @@ -1650,14 +1584,14 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq) rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; rxd->fld.dma_addr_lo = dma_addr & 0xffffffff; rxe[i].mbuf = mbuf; - PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr); + PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr); } /* Make sure all writes are flushed before telling the hardware */ rte_wmb(); /* Not advertising the whole ring as the firmware gets confused if so */ - PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n", + PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u", rxq->rx_count - 1); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1); @@ -1683,7 +1617,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 || (nb_desc > NFP_NET_MAX_TX_DESC) || (nb_desc < NFP_NET_MIN_TX_DESC)) { - RTE_LOG(ERR, PMD, "Wrong nb_desc value\n"); + PMD_DRV_LOG(ERR, "Wrong nb_desc value"); return -EINVAL; } @@ -1692,10 +1626,10 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, DEFAULT_TX_FREE_THRESH); if (tx_free_thresh > (nb_desc)) { - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX " "descriptors. (tx_free_thresh=%u port=%d " - "queue=%d)\n", (unsigned int)tx_free_thresh, + "queue=%d)", (unsigned int)tx_free_thresh, dev->data->port_id, (int)queue_idx); return -(EINVAL); } @@ -1705,7 +1639,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, * calling nfp_net_stop */ if (dev->data->tx_queues[queue_idx]) { - PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n", + PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", queue_idx); nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]); dev->data->tx_queues[queue_idx] = NULL; @@ -1715,7 +1649,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq), RTE_CACHE_LINE_SIZE, socket_id); if (txq == NULL) { - RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); + PMD_DRV_LOG(ERR, "Error allocating tx dma"); return -ENOMEM; } @@ -1729,7 +1663,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN, socket_id); if (tz == NULL) { - RTE_LOG(ERR, PMD, "Error allocating tx dma\n"); + PMD_DRV_LOG(ERR, "Error allocating tx dma"); nfp_net_tx_queue_release(txq); return -ENOMEM; } @@ -1746,7 +1680,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx); txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; /* Saving physical and virtual addresses for the TX ring */ txq->dma = (uint64_t)tz->iova; @@ -1760,7 +1693,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, nfp_net_tx_queue_release(txq); return -ENOMEM; } - PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n", + PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64, txq->txbufs, txq->txds, (unsigned long int)txq->dma); nfp_net_reset_tx_queue(txq); @@ -1786,7 +1719,7 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, uint64_t ol_flags; struct nfp_net_hw *hw = txq->hw; - if (!(hw->cap & NFP_NET_CFG_CTRL_LSO)) + if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)) goto clean_txd; ol_flags = mb->ol_flags; @@ -1794,15 +1727,19 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd, if (!(ol_flags & PKT_TX_TCP_SEG)) goto clean_txd; - txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len; - txd->lso = rte_cpu_to_le_16(mb->tso_segsz); + txd->l3_offset = mb->l2_len; + txd->l4_offset = mb->l2_len + mb->l3_len; + txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len; + txd->mss = rte_cpu_to_le_16(mb->tso_segsz); txd->flags = PCIE_DESC_TX_LSO; return; clean_txd: txd->flags = 0; + txd->l3_offset = 0; txd->l4_offset = 0; - txd->lso = 0; + txd->lso_hdrlen = 0; + txd->mss = 0; } /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */ @@ -1888,14 +1825,10 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) return; - if (NFD_CFG_MAJOR_VERSION_of(hw->ver) <= 3) { - if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) - return; - - hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET); - hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET); - - } else if (NFP_DESC_META_LEN(rxd)) { + /* this is true for new firmwares */ + if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) || + (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) && + NFP_DESC_META_LEN(rxd))) { /* * new metadata api: * <---- 32 bit -----> @@ -1928,7 +1861,11 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, return; } } else { - return; + if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) + return; + + hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET); + hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET); } mbuf->hash.rss = hash; @@ -2019,16 +1956,16 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) break; } + rxds = &rxq->rxds[rxq->rd_p]; + if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) + break; + /* * Memory barrier to ensure that we won't do other * reads before the DD bit. */ rte_rmb(); - rxds = &rxq->rxds[rxq->rd_p]; - if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) - break; - /* * We got a packet. Let's alloc a new mbuff for refilling the * free descriptor ring as soon as possible @@ -2051,7 +1988,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) mb = rxb->mbuf; rxb->mbuf = new_mb; - PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n", + PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u", rxds->rxd.data_len, rxq->mbuf_size); /* Size of this segment */ @@ -2089,6 +2026,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) mb->nb_segs = 1; mb->next = NULL; + mb->port = rxq->port_id; + /* Checking the RSS flag */ nfp_net_set_hash(rxq, rxds, mb); @@ -2120,7 +2059,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (nb_hold == 0) return nb_hold; - PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n", + PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received", rxq->port_id, (unsigned int)rxq->qidx, nb_hold); nb_hold += rxq->nb_rx_hold; @@ -2131,7 +2070,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) */ rte_wmb(); if (nb_hold > rxq->rx_free_thresh) { - PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n", + PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u", rxq->port_id, (unsigned int)rxq->qidx, (unsigned)nb_hold, (unsigned)avail); nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold); @@ -2155,14 +2094,14 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq) int todo; PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete" - " status\n", txq->qidx); + " status", txq->qidx); /* Work out how many packets have been sent */ qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR); if (qcp_rd_p == txq->rd_p) { PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending " - "packets (%u, %u)\n", txq->qidx, + "packets (%u, %u)", txq->qidx, qcp_rd_p, txq->rd_p); return 0; } @@ -2172,7 +2111,7 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq) else todo = qcp_rd_p + txq->tx_count - txq->rd_p; - PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n", + PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u", qcp_rd_p, txq->rd_p, txq->rd_p); if (todo == 0) @@ -2226,7 +2165,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) hw = txq->hw; txds = &txq->txds[txq->wr_p]; - PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n", + PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets", txq->qidx, txq->wr_p, nb_pkts); if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq))) @@ -2240,7 +2179,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) i = 0; issued_descs = 0; - PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n", + PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets", txq->qidx, nb_pkts); /* Sending packets */ while ((i < nb_pkts) && free_descs) { @@ -2299,7 +2238,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) dma_size = pkt->data_len; dma_addr = rte_mbuf_data_iova(pkt); PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" - "%" PRIx64 "\n", dma_addr); + "%" PRIx64 "", dma_addr); /* Filling descriptors fields */ txds->dma_len = dma_size; @@ -2349,7 +2288,7 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) if ((mask & ETH_VLAN_FILTER_OFFLOAD) || (mask & ETH_VLAN_EXTEND_OFFLOAD)) - RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or" + PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or" " ETH_VLAN_EXTEND_OFFLOAD"); /* Enable vlan strip if it is not configured yet */ @@ -2386,9 +2325,9 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev, NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { - RTE_LOG(ERR, PMD, "The size of hash lookup table configured " + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); return -EINVAL; } @@ -2467,9 +2406,9 @@ nfp_net_reta_query(struct rte_eth_dev *dev, return -EINVAL; if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) { - RTE_LOG(ERR, PMD, "The size of hash lookup table configured " + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); + "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ); return -EINVAL; } @@ -2555,14 +2494,14 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev, /* Checking if RSS is enabled */ if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) { if (rss_hf != 0) { /* Enable RSS? */ - RTE_LOG(ERR, PMD, "RSS unsupported\n"); + PMD_DRV_LOG(ERR, "RSS unsupported"); return -EINVAL; } return 0; /* Nothing to do */ } if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) { - RTE_LOG(ERR, PMD, "hash key too long\n"); + PMD_DRV_LOG(ERR, "hash key too long"); return -EINVAL; } @@ -2634,7 +2573,7 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev) uint16_t queue; int i, j, ret; - RTE_LOG(INFO, PMD, "setting default RSS conf for %u queues\n", + PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues", rx_queues); nfp_reta_conf[0].mask = ~0x0; @@ -2654,7 +2593,7 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev) dev_conf = &dev->data->dev_conf; if (!dev_conf) { - RTE_LOG(INFO, PMD, "wrong rss conf"); + PMD_DRV_LOG(INFO, "wrong rss conf"); return -EINVAL; } rss_conf = dev_conf->rx_adv_conf.rss_conf; @@ -2679,6 +2618,7 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = { .dev_infos_get = nfp_net_infos_get, .dev_supported_ptypes_get = nfp_net_supported_ptypes_get, .mtu_set = nfp_net_dev_mtu_set, + .mac_addr_set = nfp_set_mac_addr, .vlan_offload_set = nfp_net_vlan_offload_set, .reta_update = nfp_net_reta_update, .reta_query = nfp_net_reta_query, @@ -2734,10 +2674,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) uint64_t tx_bar_off = 0, rx_bar_off = 0; uint32_t start_q; int stride = 4; - - nspu_desc_t *nspu_desc = NULL; - uint64_t bar_offset; int port = 0; + int err; PMD_INIT_FUNC_TRACE(); @@ -2747,18 +2685,17 @@ nfp_net_init(struct rte_eth_dev *eth_dev) (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) { port = get_pf_port_number(eth_dev->data->name); if (port < 0 || port > 7) { - RTE_LOG(ERR, PMD, "Port value is wrong\n"); + PMD_DRV_LOG(ERR, "Port value is wrong"); return -ENODEV; } - PMD_INIT_LOG(DEBUG, "Working with PF port value %d\n", port); + PMD_INIT_LOG(DEBUG, "Working with PF port value %d", port); /* This points to port 0 private data */ hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); /* This points to the specific port private data */ hw = &hwport0[port]; - hw->pf_port_idx = port; } else { hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); hwport0 = 0; @@ -2786,26 +2723,21 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr; if (hw->ctrl_bar == NULL) { - RTE_LOG(ERR, PMD, - "hw->ctrl_bar is NULL. BAR0 not configured\n"); + PMD_DRV_LOG(ERR, + "hw->ctrl_bar is NULL. BAR0 not configured"); return -ENODEV; } if (hw->is_pf && port == 0) { - nspu_desc = hw->nspu_desc; - - if (nfp_nsp_map_ctrl_bar(nspu_desc, &bar_offset) != 0) { - /* - * A firmware should be there after PF probe so this - * should not happen. - */ - RTE_LOG(ERR, PMD, "PF BAR symbol resolution failed\n"); - return -ENODEV; + hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0", + hw->total_ports * 32768, + &hw->ctrl_area); + if (!hw->ctrl_bar) { + printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar"); + return -EIO; } - /* vNIC PF control BAR is a subset of PF PCI device BAR */ - hw->ctrl_bar += bar_offset; - PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar); + PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); } if (port > 0) { @@ -2817,7 +2749,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) (port * NFP_PF_CSR_SLICE_SIZE); } - PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar); + PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS); hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS); @@ -2828,31 +2760,34 @@ nfp_net_init(struct rte_eth_dev *eth_dev) case PCI_DEVICE_ID_NFP6000_PF_NIC: case PCI_DEVICE_ID_NFP6000_VF_NIC: start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); - tx_bar_off = NFP_PCIE_QUEUE(start_q); + tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ); - rx_bar_off = NFP_PCIE_QUEUE(start_q); + rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; break; default: - RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n"); - return -ENODEV; + PMD_DRV_LOG(ERR, "nfp_net: no device ID matching"); + err = -ENODEV; + goto dev_err_ctrl_map; } - PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "\n", tx_bar_off); - PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "\n", rx_bar_off); + PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off); + PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off); if (hw->is_pf && port == 0) { /* configure access to tx/rx vNIC BARs */ - nfp_nsp_map_queues_bar(nspu_desc, &bar_offset); - PMD_INIT_LOG(DEBUG, "tx/rx bar_offset: %" PRIx64 "\n", - bar_offset); - hwport0->hw_queues = (uint8_t *)pci_dev->mem_resource[0].addr; - - /* vNIC PF tx/rx BARs are a subset of PF PCI device */ - hwport0->hw_queues += bar_offset; + hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0, + NFP_PCIE_QUEUE(0), + NFP_QCP_QUEUE_AREA_SZ, + &hw->hwqueues_area); + + if (!hwport0->hw_queues) { + printf("nfp_rtsym_map fails for net.qc"); + err = -EIO; + goto dev_err_ctrl_map; + } - /* Lets seize the chance to read eth table from hw */ - if (nfp_nsp_eth_read_table(nspu_desc, &hw->eth_table)) - return -ENODEV; + PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", + hwport0->hw_queues); } if (hw->is_pf) { @@ -2877,14 +2812,20 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = ETHER_MTU; + /* VLAN insertion is incompatible with LSOv2 */ + if (hw->cap & NFP_NET_CFG_CTRL_LSO2) + hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN; + if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) hw->rx_offset = NFP_NET_RX_OFFSET; else hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR); - PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d", - hw->ver, hw->max_mtu); - PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap, + PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d", + NFD_CFG_MAJOR_VERSION_of(hw->ver), + NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu); + + PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap, hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", @@ -2894,8 +2835,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", + hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "", hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "", - hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : ""); + hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "", + hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "", + hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : ""); hw->ctrl = 0; @@ -2912,7 +2856,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, "Failed to space for MAC address"); - return -ENOMEM; + err = -ENOMEM; + goto dev_err_queues_map; } if (hw->is_pf) { @@ -2923,6 +2868,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) } if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) { + PMD_INIT_LOG(INFO, "Using random mac address for port %d", + port); /* Using random mac addresses for VFs */ eth_random_addr(&hw->mac_addr[0]); nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr); @@ -2951,11 +2898,19 @@ nfp_net_init(struct rte_eth_dev *eth_dev) nfp_net_stats_reset(eth_dev); return 0; + +dev_err_queues_map: + nfp_cpp_area_free(hw->hwqueues_area); +dev_err_ctrl_map: + nfp_cpp_area_free(hw->ctrl_area); + + return err; } static int nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, - nfpu_desc_t *nfpu_desc, void **priv) + struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo, + int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv) { struct rte_eth_dev *eth_dev; struct nfp_net_hw *hw; @@ -2993,12 +2948,16 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, * Then dev_private is adjusted per port. */ hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port; - hw->nspu_desc = nfpu_desc->nspu; - hw->nfpu_desc = nfpu_desc; + hw->cpp = cpp; + hw->hwinfo = hwinfo; + hw->sym_tbl = sym_tbl; + hw->pf_port_idx = phys_port; hw->is_pf = 1; if (ports > 1) hw->pf_multiport_enabled = 1; + hw->total_ports = ports; + eth_dev->device = &dev->device; rte_eth_copy_pci_info(eth_dev, dev); @@ -3006,82 +2965,228 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports, if (ret) rte_eth_dev_release_port(eth_dev); + else + rte_eth_dev_probing_finish(eth_dev); rte_free(port_name); return ret; } +#define DEFAULT_FW_PATH "/lib/firmware/netronome" + +static int +nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) +{ + struct nfp_cpp *cpp = nsp->cpp; + int fw_f; + char *fw_buf; + char fw_name[125]; + char serial[40]; + struct stat file_stat; + off_t fsize, bytes; + + /* Looking for firmware file in order of priority */ + + /* First try to find a firmware image specific for this device */ + sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", + cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3], + cpp->serial[4], cpp->serial[5], cpp->interface >> 8, + cpp->interface & 0xff); + + sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial); + + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + fw_f = open(fw_name, O_RDONLY); + if (fw_f > 0) + goto read_fw; + + /* Then try the PCI name */ + sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name); + + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + fw_f = open(fw_name, O_RDONLY); + if (fw_f > 0) + goto read_fw; + + /* Finally try the card type and media */ + sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card); + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + fw_f = open(fw_name, O_RDONLY); + if (fw_f < 0) { + PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name); + return -ENOENT; + } + +read_fw: + if (fstat(fw_f, &file_stat) < 0) { + PMD_DRV_LOG(INFO, "Firmware file %s size is unknown", fw_name); + close(fw_f); + return -ENOENT; + } + + fsize = file_stat.st_size; + PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %" PRIu64 "", + fw_name, (uint64_t)fsize); + + fw_buf = malloc((size_t)fsize); + if (!fw_buf) { + PMD_DRV_LOG(INFO, "malloc failed for fw buffer"); + close(fw_f); + return -ENOMEM; + } + memset(fw_buf, 0, fsize); + + bytes = read(fw_f, fw_buf, fsize); + if (bytes != fsize) { + PMD_DRV_LOG(INFO, "Reading fw to buffer failed." + "Just %" PRIu64 " of %" PRIu64 " bytes read", + (uint64_t)bytes, (uint64_t)fsize); + free(fw_buf); + close(fw_f); + return -EIO; + } + + PMD_DRV_LOG(INFO, "Uploading the firmware ..."); + nfp_nsp_load_fw(nsp, fw_buf, bytes); + PMD_DRV_LOG(INFO, "Done"); + + free(fw_buf); + close(fw_f); + + return 0; +} + +static int +nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp, + struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo) +{ + struct nfp_nsp *nsp; + const char *nfp_fw_model; + char card_desc[100]; + int err = 0; + + nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); + + if (nfp_fw_model) { + PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); + } else { + PMD_DRV_LOG(ERR, "firmware model NOT found"); + return -EIO; + } + + if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) { + PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u", + nfp_eth_table->count); + return -EIO; + } + + PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports", + nfp_eth_table->count); + + PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed); + + sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model, + nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000); + + nsp = nfp_nsp_open(cpp); + if (!nsp) { + PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle"); + return -EIO; + } + + nfp_nsp_device_soft_reset(nsp); + err = nfp_fw_upload(dev, nsp, card_desc); + + nfp_nsp_close(nsp); + return err; +} + static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *dev) { - nfpu_desc_t *nfpu_desc; - nspu_desc_t *nspu_desc; - uint64_t offset_symbol; - uint8_t *bar_offset; - int major, minor; + struct nfp_cpp *cpp; + struct nfp_hwinfo *hwinfo; + struct nfp_rtsym_table *sym_tbl; + struct nfp_eth_table *nfp_eth_table = NULL; int total_ports; void *priv = 0; int ret = -ENODEV; + int err; int i; if (!dev) return ret; - nfpu_desc = rte_malloc("nfp nfpu", sizeof(nfpu_desc_t), 0); - if (!nfpu_desc) - return -ENOMEM; + /* + * When device bound to UIO, the device could be used, by mistake, + * by two DPDK apps, and the UIO driver does not avoid it. This + * could lead to a serious problem when configuring the NFP CPP + * interface. Here we avoid this telling to the CPP init code to + * use a lock file if UIO is being used. + */ + if (dev->kdrv == RTE_KDRV_VFIO) + cpp = nfp_cpp_from_device_name(dev->device.name, 0); + else + cpp = nfp_cpp_from_device_name(dev->device.name, 1); - if (nfpu_open(dev, nfpu_desc, 0) < 0) { - RTE_LOG(ERR, PMD, - "nfpu_open failed\n"); - goto nfpu_error; + if (!cpp) { + PMD_DRV_LOG(ERR, "A CPP handle can not be obtained"); + ret = -EIO; + goto error; } - nspu_desc = nfpu_desc->nspu; + hwinfo = nfp_hwinfo_read(cpp); + if (!hwinfo) { + PMD_DRV_LOG(ERR, "Error reading hwinfo table"); + return -EIO; + } + nfp_eth_table = nfp_eth_read_ports(cpp); + if (!nfp_eth_table) { + PMD_DRV_LOG(ERR, "Error reading NFP ethernet table"); + return -EIO; + } - /* Check NSP ABI version */ - if (nfp_nsp_get_abi_version(nspu_desc, &major, &minor) < 0) { - RTE_LOG(INFO, PMD, "NFP NSP not present\n"); + if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) { + PMD_DRV_LOG(INFO, "Error when uploading firmware"); + ret = -EIO; goto error; } - PMD_INIT_LOG(INFO, "nspu ABI version: %d.%d\n", major, minor); - if ((major == 0) && (minor < 20)) { - RTE_LOG(INFO, PMD, "NFP NSP ABI version too old. Required 0.20 or higher\n"); + /* Now the symbol table should be there */ + sym_tbl = nfp_rtsym_table_read(cpp); + if (!sym_tbl) { + PMD_DRV_LOG(ERR, "Something is wrong with the firmware" + " symbol table"); + ret = -EIO; goto error; } - ret = nfp_nsp_fw_setup(nspu_desc, "nfd_cfg_pf0_num_ports", - &offset_symbol); - if (ret) + total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err); + if (total_ports != (int)nfp_eth_table->count) { + PMD_DRV_LOG(ERR, "Inconsistent number of ports"); + ret = -EIO; goto error; - - bar_offset = (uint8_t *)dev->mem_resource[0].addr; - bar_offset += offset_symbol; - total_ports = (uint32_t)*bar_offset; - PMD_INIT_LOG(INFO, "Total pf ports: %d\n", total_ports); + } + PMD_INIT_LOG(INFO, "Total pf ports: %d", total_ports); if (total_ports <= 0 || total_ports > 8) { - RTE_LOG(ERR, PMD, "nfd_cfg_pf0_num_ports symbol with wrong value"); + PMD_DRV_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value"); ret = -ENODEV; goto error; } for (i = 0; i < total_ports; i++) { - ret = nfp_pf_create_dev(dev, i, total_ports, nfpu_desc, &priv); + ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo, + nfp_eth_table->ports[i].index, + sym_tbl, &priv); if (ret) - goto error; + break; } - return 0; - error: - nfpu_close(nfpu_desc); -nfpu_error: - rte_free(nfpu_desc); - + free(nfp_eth_table); return ret; } @@ -3129,8 +3234,19 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) || (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) { port = get_pf_port_number(eth_dev->data->name); + /* + * hotplug is not possible with multiport PF although freeing + * data structures can be done for first port. + */ + if (port != 0) + return -ENOTSUP; hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); hw = &hwport0[port]; + nfp_cpp_area_free(hw->ctrl_area); + nfp_cpp_area_free(hw->hwqueues_area); + free(hw->hwinfo); + free(hw->sym_tbl); + nfp_cpp_free(hw->cpp); } else { hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); } diff --git a/drivers/net/nfp/nfp_net_ctrl.h b/drivers/net/nfp/nfp_net_ctrl.h index 1ebd99ca..21e17da1 100644 --- a/drivers/net/nfp/nfp_net_ctrl.h +++ b/drivers/net/nfp/nfp_net_ctrl.h @@ -120,6 +120,9 @@ #define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */ #define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* Enable NVGRE */ #define NFP_NET_CFG_CTRL_MSIX_TX_OFF (0x1 << 26) /* Disable MSIX for TX */ +#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */ +#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */ +#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1 << 31) /* live MAC addr change */ #define NFP_NET_CFG_UPDATE 0x0004 #define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */ #define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */ @@ -131,6 +134,7 @@ #define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */ #define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */ #define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */ +#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */ #define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */ #define NFP_NET_CFG_TXRS_ENABLE 0x0008 #define NFP_NET_CFG_RXRS_ENABLE 0x0010 @@ -140,6 +144,8 @@ #define NFP_NET_CFG_LSC 0x0020 #define NFP_NET_CFG_MACADDR 0x0024 +#define NFP_NET_CFG_CTRL_LSO_ANY (NFP_NET_CFG_CTRL_LSO | NFP_NET_CFG_CTRL_LSO2) + /* * Read-only words (0x0030 - 0x0050): * @NFP_NET_CFG_VERSION: Firmware version number diff --git a/drivers/net/nfp/nfp_net_eth.h b/drivers/net/nfp/nfp_net_eth.h deleted file mode 100644 index af57f03c..00000000 --- a/drivers/net/nfp/nfp_net_eth.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2017 Netronome Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution - * - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * vim:shiftwidth=8:noexpandtab - * - * @file dpdk/pmd/nfp_net_eth.h - * - * Netronome NFP_NET PDM driver - */ - -union eth_table_entry { - struct { - uint64_t port; - uint64_t state; - uint8_t mac_addr[6]; - uint8_t resv[2]; - uint64_t control; - }; - uint64_t raw[4]; -}; - -#ifndef BIT_ULL -#define BIT_ULL(a) (1ULL << (a)) -#endif - -#define NSP_ETH_NBI_PORT_COUNT 24 -#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) -#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * sizeof(union eth_table_entry)) - -#define NSP_ETH_PORT_LANES 0xf -#define NSP_ETH_PORT_INDEX 0xff00 -#define NSP_ETH_PORT_LABEL 0x3f000000000000 -#define NSP_ETH_PORT_PHYLABEL 0xfc0000000000000 - -#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES) - -#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0) -#define NSP_ETH_STATE_ENABLED BIT_ULL(1) -#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2) -#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3) -#define NSP_ETH_STATE_RATE 0xf00 -#define NSP_ETH_STATE_INTERFACE 0xff000 -#define NSP_ETH_STATE_MEDIA 0x300000 -#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22) -#define NSP_ETH_STATE_ANEG 0x3800000 - -#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0) -#define NSP_ETH_CTRL_ENABLED BIT_ULL(1) -#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2) -#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3) -#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4) -#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5) -#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6) diff --git a/drivers/net/nfp/nfp_net_logs.h b/drivers/net/nfp/nfp_net_logs.h index 3fe24e96..9952881c 100644 --- a/drivers/net/nfp/nfp_net_logs.h +++ b/drivers/net/nfp/nfp_net_logs.h @@ -36,19 +36,20 @@ extern int nfp_logtype_init; #define PMD_INIT_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) + rte_log(RTE_LOG_ ## level, nfp_logtype_init, \ + "%s(): " fmt "\n", __func__, ## args) #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") #ifdef RTE_LIBRTE_NFP_NET_DEBUG_RX #define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s() rx: " fmt, __func__, ## args) + RTE_LOG(level, PMD, "%s() rx: " fmt "\n", __func__, ## args) #else #define PMD_RX_LOG(level, fmt, args...) do { } while (0) #endif #ifdef RTE_LIBRTE_NFP_NET_DEBUG_TX #define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s() tx: " fmt, __func__, ## args) + RTE_LOG(level, PMD, "%s() tx: " fmt "\n", __func__, ## args) #define ASSERT(x) if (!(x)) rte_panic("NFP_NET: x") #else #define PMD_TX_LOG(level, fmt, args...) do { } while (0) @@ -58,6 +59,6 @@ extern int nfp_logtype_init; extern int nfp_logtype_driver; #define PMD_DRV_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, nfp_logtype_driver, \ - "%s(): " fmt, __func__, ## args) + "%s(): " fmt "\n", __func__, ## args) #endif /* _NFP_NET_LOGS_H_ */ diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h index 1ae0ea62..c1b044ee 100644 --- a/drivers/net/nfp/nfp_net_pmd.h +++ b/drivers/net/nfp/nfp_net_pmd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015 Netronome Systems, Inc. + * Copyright (c) 2014-2018 Netronome Systems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,6 +63,7 @@ struct nfp_net_adapter; #define NFP_NET_CRTL_BAR 0 #define NFP_NET_TX_BAR 2 #define NFP_NET_RX_BAR 2 +#define NFP_QCP_QUEUE_AREA_SZ 0x80000 /* Macros for accessing the Queue Controller Peripheral 'CSRs' */ #define NFP_QCP_QUEUE_OFF(_x) ((_x) * 0x800) @@ -184,18 +185,28 @@ static inline void nn_writeq(uint64_t val, volatile void *addr) struct nfp_net_tx_desc { union { struct { - uint8_t dma_addr_hi; /* High bits of host buf address */ + uint8_t dma_addr_hi; /* High bits of host buf address */ __le16 dma_len; /* Length to DMA for this desc */ - uint8_t offset_eop; /* Offset in buf where pkt starts + + uint8_t offset_eop; /* Offset in buf where pkt starts + * highest bit is eop flag. */ __le32 dma_addr_lo; /* Low 32bit of host buf addr */ - __le16 lso; /* MSS to be used for LSO */ - uint8_t l4_offset; /* LSO, where the L4 data starts */ - uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */ - - __le16 vlan; /* VLAN tag to add if indicated */ + __le16 mss; /* MSS to be used for LSO */ + uint8_t lso_hdrlen; /* LSO, where the data starts */ + uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */ + + union { + struct { + /* + * L3 and L4 header offsets required + * for TSOv2 + */ + uint8_t l3_offset; + uint8_t l4_offset; + }; + __le16 vlan; /* VLAN tag to add if indicated */ + }; __le16 data_len; /* Length of frame + meta data */ } __attribute__((__packed__)); __le32 vals[4]; @@ -247,15 +258,13 @@ struct nfp_net_txq { /* * At this point 48 bytes have been used for all the fields in the * TX critical path. We have room for 8 bytes and still all placed - * in a cache line. We are not using the threshold values below nor - * the txq_flags but if we need to, we can add the most used in the - * remaining bytes. + * in a cache line. We are not using the threshold values below but + * if we need to, we can add the most used in the remaining bytes. */ uint32_t tx_rs_thresh; /* not used by now. Future? */ uint32_t tx_pthresh; /* not used by now. Future? */ uint32_t tx_hthresh; /* not used by now. Future? */ uint32_t tx_wthresh; /* not used by now. Future? */ - uint32_t txq_flags; /* not used by now. Future? */ uint16_t port_id; int qidx; int tx_qcidx; @@ -430,20 +439,21 @@ struct nfp_net_hw { /* Records starting point for counters */ struct rte_eth_stats eth_stats_base; -#ifdef NFP_NET_LIBNFP struct nfp_cpp *cpp; struct nfp_cpp_area *ctrl_area; - struct nfp_cpp_area *tx_area; - struct nfp_cpp_area *rx_area; + struct nfp_cpp_area *hwqueues_area; struct nfp_cpp_area *msix_area; -#endif + uint8_t *hw_queues; uint8_t is_pf; uint8_t pf_port_idx; uint8_t pf_multiport_enabled; + uint8_t total_ports; + union eth_table_entry *eth_table; - nspu_desc_t *nspu_desc; - nfpu_desc_t *nfpu_desc; + + struct nfp_hwinfo *hwinfo; + struct nfp_rtsym_table *sym_tbl; }; struct nfp_net_adapter { diff --git a/drivers/net/nfp/nfp_nfpu.c b/drivers/net/nfp/nfp_nfpu.c deleted file mode 100644 index f11afef3..00000000 --- a/drivers/net/nfp/nfp_nfpu.c +++ /dev/null @@ -1,108 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "nfp_nfpu.h" - -/* PF BAR and expansion BAR for the NSP interface */ -#define NFP_CFG_PCIE_BAR 0 -#define NFP_CFG_EXP_BAR 7 - -#define NFP_CFG_EXP_BAR_CFG_BASE 0x30000 - -/* There could be other NFP userspace tools using the NSP interface. - * Make sure there is no other process using it and locking the access for - * avoiding problems. - */ -static int -nspv_aquire_process_lock(nfpu_desc_t *desc) -{ - int rc; - struct flock lock; - char lockname[30]; - - memset(&lock, 0, sizeof(lock)); - - snprintf(lockname, sizeof(lockname), "/var/lock/nfp%d", desc->nfp); - - /* Using S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH */ - desc->lock = open(lockname, O_RDWR | O_CREAT, 0666); - - if (desc->lock < 0) - return desc->lock; - - lock.l_type = F_WRLCK; - lock.l_whence = SEEK_SET; - rc = -1; - while (rc != 0) { - rc = fcntl(desc->lock, F_SETLK, &lock); - if (rc < 0) { - if ((errno != EAGAIN) && (errno != EACCES)) { - close(desc->lock); - return rc; - } - } - } - - return 0; -} - -int -nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp) -{ - void *cfg_base, *mem_base; - size_t barsz; - int ret = 0; - int i = 0; - - desc->nfp = nfp; - - ret = nspv_aquire_process_lock(desc); - if (ret) - return -1; - - barsz = pci_dev->mem_resource[0].len; - - /* barsz in log2 */ - while (barsz >>= 1) - i++; - - barsz = i; - - /* Sanity check: we can assume any bar size less than 1MB an error */ - if (barsz < 20) - return -1; - - /* Getting address for NFP expansion BAR registers */ - cfg_base = pci_dev->mem_resource[0].addr; - cfg_base = (uint8_t *)cfg_base + NFP_CFG_EXP_BAR_CFG_BASE; - - /* Getting address for NFP NSP interface registers */ - mem_base = pci_dev->mem_resource[0].addr; - mem_base = (uint8_t *)mem_base + (NFP_CFG_EXP_BAR << (barsz - 3)); - - - desc->nspu = rte_malloc("nfp nspu", sizeof(nspu_desc_t), 0); - nfp_nspu_init(desc->nspu, desc->nfp, NFP_CFG_PCIE_BAR, barsz, - NFP_CFG_EXP_BAR, cfg_base, mem_base); - - return ret; -} - -int -nfpu_close(nfpu_desc_t *desc) -{ - rte_free(desc->nspu); - close(desc->lock); - unlink("/var/lock/nfp0"); - return 0; -} diff --git a/drivers/net/nfp/nfp_nfpu.h b/drivers/net/nfp/nfp_nfpu.h deleted file mode 100644 index e56fa099..00000000 --- a/drivers/net/nfp/nfp_nfpu.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2017 Netronome Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution - * - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * vim:shiftwidth=8:noexpandtab - * - * @file dpdk/pmd/nfp_nfpu.h - * - * Netronome NFP_NET PDM driver - */ - -/* - * NFP User interface creates a window for talking with NFP NSP processor - */ - - -#include -#include "nfp_nspu.h" - -typedef struct { - int nfp; - int lock; - nspu_desc_t *nspu; -} nfpu_desc_t; - -int nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp); -int nfpu_close(nfpu_desc_t *desc); diff --git a/drivers/net/nfp/nfp_nspu.c b/drivers/net/nfp/nfp_nspu.c deleted file mode 100644 index f9089832..00000000 --- a/drivers/net/nfp/nfp_nspu.c +++ /dev/null @@ -1,642 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "nfp_nfpu.h" - -#define CFG_EXP_BAR_ADDR_SZ 1 -#define CFG_EXP_BAR_MAP_TYPE 1 - -#define EXP_BAR_TARGET_SHIFT 23 -#define EXP_BAR_LENGTH_SHIFT 27 /* 0=32, 1=64 bit increment */ -#define EXP_BAR_MAP_TYPE_SHIFT 29 /* Bulk BAR map */ - -/* NFP target for NSP access */ -#define NFP_NSP_TARGET 7 - -/* Expansion BARs for mapping PF vnic BARs */ -#define NFP_NET_PF_CFG_EXP_BAR 6 -#define NFP_NET_PF_HW_QUEUES_EXP_BAR 5 - -/* - * This is an NFP internal address used for configuring properly an NFP - * expansion BAR. - */ -#define MEM_CMD_BASE_ADDR 0x8100000000 - -/* NSP interface registers */ -#define NSP_BASE (MEM_CMD_BASE_ADDR + 0x22100) -#define NSP_STATUS 0x00 -#define NSP_COMMAND 0x08 -#define NSP_BUFFER 0x10 -#define NSP_DEFAULT_BUF 0x18 -#define NSP_DEFAULT_BUF_CFG 0x20 - -#define NSP_MAGIC 0xab10 -#define NSP_STATUS_MAGIC(x) (((x) >> 48) & 0xffff) -#define NSP_STATUS_MAJOR(x) (int)(((x) >> 44) & 0xf) -#define NSP_STATUS_MINOR(x) (int)(((x) >> 32) & 0xfff) - -/* NSP commands */ -#define NSP_CMD_RESET 1 -#define NSP_CMD_FW_LOAD 6 -#define NSP_CMD_READ_ETH_TABLE 7 -#define NSP_CMD_WRITE_ETH_TABLE 8 -#define NSP_CMD_GET_SYMBOL 14 - -#define NSP_BUFFER_CFG_SIZE_MASK (0xff) - -#define NSP_REG_ADDR(d, off, reg) ((uint8_t *)(d)->mem_base + (off) + (reg)) -#define NSP_REG_VAL(p) (*(uint64_t *)(p)) - -/* - * An NFP expansion BAR is configured for allowing access to a specific NFP - * target: - * - * IN: - * desc: struct with basic NSP addresses to work with - * expbar: NFP PF expansion BAR index to configure - * tgt: NFP target to configure access - * addr: NFP target address - * - * OUT: - * pcie_offset: NFP PCI BAR offset to work with - */ -static void -nfp_nspu_mem_bar_cfg(nspu_desc_t *desc, int expbar, int tgt, - uint64_t addr, uint64_t *pcie_offset) -{ - uint64_t x, y, barsz; - uint32_t *expbar_ptr; - - barsz = desc->barsz; - - /* - * NFP CPP address to configure. This comes from NFP 6000 - * datasheet document based on Bulk mapping. - */ - x = (addr >> (barsz - 3)) << (21 - (40 - (barsz - 3))); - x |= CFG_EXP_BAR_MAP_TYPE << EXP_BAR_MAP_TYPE_SHIFT; - x |= CFG_EXP_BAR_ADDR_SZ << EXP_BAR_LENGTH_SHIFT; - x |= tgt << EXP_BAR_TARGET_SHIFT; - - /* Getting expansion bar configuration register address */ - expbar_ptr = (uint32_t *)desc->cfg_base; - /* Each physical PCI BAR has 8 NFP expansion BARs */ - expbar_ptr += (desc->pcie_bar * 8) + expbar; - - /* Writing to the expansion BAR register */ - *expbar_ptr = (uint32_t)x; - - /* Getting the pcie offset to work with from userspace */ - y = addr & ((uint64_t)(1 << (barsz - 3)) - 1); - *pcie_offset = y; -} - -/* - * Configuring an expansion bar for accessing NSP userspace interface. This - * function configures always the same expansion bar, which implies access to - * previously configured NFP target is lost. - */ -static void -nspu_xlate(nspu_desc_t *desc, uint64_t addr, uint64_t *pcie_offset) -{ - nfp_nspu_mem_bar_cfg(desc, desc->exp_bar, NFP_NSP_TARGET, addr, - pcie_offset); -} - -int -nfp_nsp_get_abi_version(nspu_desc_t *desc, int *major, int *minor) -{ - uint64_t pcie_offset; - uint64_t nsp_reg; - - nspu_xlate(desc, NSP_BASE, &pcie_offset); - nsp_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, pcie_offset, NSP_STATUS)); - - if (NSP_STATUS_MAGIC(nsp_reg) != NSP_MAGIC) - return -1; - - *major = NSP_STATUS_MAJOR(nsp_reg); - *minor = NSP_STATUS_MINOR(nsp_reg); - - return 0; -} - -int -nfp_nspu_init(nspu_desc_t *desc, int nfp, int pcie_bar, size_t pcie_barsz, - int exp_bar, void *exp_bar_cfg_base, void *exp_bar_mmap) -{ - uint64_t offset, buffaddr; - uint64_t nsp_reg; - - desc->nfp = nfp; - desc->pcie_bar = pcie_bar; - desc->exp_bar = exp_bar; - desc->barsz = pcie_barsz; - desc->windowsz = 1 << (desc->barsz - 3); - desc->cfg_base = exp_bar_cfg_base; - desc->mem_base = exp_bar_mmap; - - nspu_xlate(desc, NSP_BASE, &offset); - - /* - * Other NSPU clients can use other buffers. Let's tell NSPU we use the - * default buffer. - */ - buffaddr = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_DEFAULT_BUF)); - NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_BUFFER)) = buffaddr; - - /* NFP internal addresses are 40 bits. Clean all other bits here */ - buffaddr = buffaddr & (((uint64_t)1 << 40) - 1); - desc->bufaddr = buffaddr; - - /* Lets get information about the buffer */ - nsp_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_DEFAULT_BUF_CFG)); - - /* Buffer size comes in MBs. Coversion to bytes */ - desc->buf_size = ((size_t)nsp_reg & NSP_BUFFER_CFG_SIZE_MASK) << 20; - - return 0; -} - -#define NSPU_NFP_BUF(addr, base, off) \ - (*(uint64_t *)((uint8_t *)(addr)->mem_base + ((base) | (off)))) - -#define NSPU_HOST_BUF(base, off) (*(uint64_t *)((uint8_t *)(base) + (off))) - -static int -nspu_buff_write(nspu_desc_t *desc, void *buffer, size_t size) -{ - uint64_t pcie_offset, pcie_window_base, pcie_window_offset; - uint64_t windowsz = desc->windowsz; - uint64_t buffaddr, j, i = 0; - int ret = 0; - - if (size > desc->buf_size) - return -1; - - buffaddr = desc->bufaddr; - windowsz = desc->windowsz; - - while (i < size) { - /* Expansion bar reconfiguration per window size */ - nspu_xlate(desc, buffaddr + i, &pcie_offset); - pcie_window_base = pcie_offset & (~(windowsz - 1)); - pcie_window_offset = pcie_offset & (windowsz - 1); - for (j = pcie_window_offset; ((j < windowsz) && (i < size)); - j += 8) { - NSPU_NFP_BUF(desc, pcie_window_base, j) = - NSPU_HOST_BUF(buffer, i); - i += 8; - } - } - - return ret; -} - -static int -nspu_buff_read(nspu_desc_t *desc, void *buffer, size_t size) -{ - uint64_t pcie_offset, pcie_window_base, pcie_window_offset; - uint64_t windowsz, i = 0, j; - uint64_t buffaddr; - int ret = 0; - - if (size > desc->buf_size) - return -1; - - buffaddr = desc->bufaddr; - windowsz = desc->windowsz; - - while (i < size) { - /* Expansion bar reconfiguration per window size */ - nspu_xlate(desc, buffaddr + i, &pcie_offset); - pcie_window_base = pcie_offset & (~(windowsz - 1)); - pcie_window_offset = pcie_offset & (windowsz - 1); - for (j = pcie_window_offset; ((j < windowsz) && (i < size)); - j += 8) { - NSPU_HOST_BUF(buffer, i) = - NSPU_NFP_BUF(desc, pcie_window_base, j); - i += 8; - } - } - - return ret; -} - -static int -nspu_command(nspu_desc_t *desc, uint16_t cmd, int read, int write, - void *buffer, size_t rsize, size_t wsize) -{ - uint64_t status, cmd_reg; - uint64_t offset; - int retry = 0; - int retries = 120; - int ret = 0; - - /* Same expansion BAR is used for different things */ - nspu_xlate(desc, NSP_BASE, &offset); - - status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS)); - - while ((status & 0x1) && (retry < retries)) { - status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS)); - retry++; - sleep(1); - } - - if (retry == retries) - return -1; - - if (write) { - ret = nspu_buff_write(desc, buffer, wsize); - if (ret) - return ret; - - /* Expansion BAR changes when writing the buffer */ - nspu_xlate(desc, NSP_BASE, &offset); - } - - NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND)) = - (uint64_t)wsize << 32 | (uint64_t)cmd << 16 | 1; - - retry = 0; - - cmd_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND)); - while ((cmd_reg & 0x1) && (retry < retries)) { - cmd_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND)); - retry++; - sleep(1); - } - if (retry == retries) - return -1; - - retry = 0; - status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS)); - while ((status & 0x1) && (retry < retries)) { - status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS)); - retry++; - sleep(1); - } - - if (retry == retries) - return -1; - - ret = status & (0xff << 8); - if (ret) - return ret; - - if (read) { - ret = nspu_buff_read(desc, buffer, rsize); - if (ret) - return ret; - } - - return ret; -} - -static int -nfp_fw_reset(nspu_desc_t *nspu_desc) -{ - int res; - - res = nspu_command(nspu_desc, NSP_CMD_RESET, 0, 0, 0, 0, 0); - - if (res < 0) - RTE_LOG(INFO, PMD, "fw reset failed: error %d", res); - - return res; -} - -#define DEFAULT_FW_PATH "/lib/firmware/netronome" -#define DEFAULT_FW_FILENAME "nic_dpdk_default.nffw" - -static int -nfp_fw_upload(nspu_desc_t *nspu_desc) -{ - int fw_f; - char *fw_buf; - char filename[100]; - struct stat file_stat; - off_t fsize, bytes; - ssize_t size; - int ret; - - size = nspu_desc->buf_size; - - sprintf(filename, "%s/%s", DEFAULT_FW_PATH, DEFAULT_FW_FILENAME); - fw_f = open(filename, O_RDONLY); - if (fw_f < 0) { - RTE_LOG(INFO, PMD, "Firmware file %s/%s not found.", - DEFAULT_FW_PATH, DEFAULT_FW_FILENAME); - return -ENOENT; - } - - if (fstat(fw_f, &file_stat) < 0) { - RTE_LOG(INFO, PMD, "Firmware file %s/%s size is unknown", - DEFAULT_FW_PATH, DEFAULT_FW_FILENAME); - close(fw_f); - return -ENOENT; - } - - fsize = file_stat.st_size; - RTE_LOG(DEBUG, PMD, "Firmware file with size: %" PRIu64 "\n", - (uint64_t)fsize); - - if (fsize > (off_t)size) { - RTE_LOG(INFO, PMD, "fw file too big: %" PRIu64 - " bytes (%" PRIu64 " max)", - (uint64_t)fsize, (uint64_t)size); - close(fw_f); - return -EINVAL; - } - - fw_buf = malloc((size_t)size); - if (!fw_buf) { - RTE_LOG(INFO, PMD, "malloc failed for fw buffer"); - close(fw_f); - return -ENOMEM; - } - memset(fw_buf, 0, size); - - bytes = read(fw_f, fw_buf, fsize); - if (bytes != fsize) { - RTE_LOG(INFO, PMD, "Reading fw to buffer failed.\n" - "Just %" PRIu64 " of %" PRIu64 " bytes read.", - (uint64_t)bytes, (uint64_t)fsize); - free(fw_buf); - close(fw_f); - return -EIO; - } - - ret = nspu_command(nspu_desc, NSP_CMD_FW_LOAD, 0, 1, fw_buf, 0, bytes); - - free(fw_buf); - close(fw_f); - - return ret; -} - -/* Firmware symbol descriptor size */ -#define NFP_SYM_DESC_LEN 40 - -#define SYMBOL_DATA(b, off) (*(int64_t *)((b) + (off))) -#define SYMBOL_UDATA(b, off) (*(uint64_t *)((b) + (off))) - -/* Firmware symbols contain information about how to access what they - * represent. It can be as simple as an numeric variable declared at a - * specific NFP memory, but it can also be more complex structures and - * related to specific hardware functionalities or components. Target, - * domain and address allow to create the BAR window for accessing such - * hw object and size defines the length to map. - * - * A vNIC is a network interface implemented inside the NFP and using a - * subset of device PCI BARs. Specific firmware symbols allow to map those - * vNIC bars by host drivers like the NFP PMD. - * - * Accessing what the symbol represents implies to map the access through - * a PCI BAR window. NFP expansion BARs are used in this regard through - * the NSPU interface. - */ -static int -nfp_nspu_set_bar_from_symbl(nspu_desc_t *desc, const char *symbl, - uint32_t expbar, uint64_t *pcie_offset, - ssize_t *size) -{ - int64_t type; - int64_t target; - int64_t domain; - uint64_t addr; - char *sym_buf; - int ret = 0; - - sym_buf = malloc(desc->buf_size); - if (!sym_buf) - return -ENOMEM; - - strncpy(sym_buf, symbl, strlen(symbl)); - ret = nspu_command(desc, NSP_CMD_GET_SYMBOL, 1, 1, sym_buf, - NFP_SYM_DESC_LEN, strlen(symbl)); - if (ret) { - RTE_LOG(DEBUG, PMD, "symbol resolution (%s) failed\n", symbl); - goto clean; - } - - /* Reading symbol information */ - type = SYMBOL_DATA(sym_buf, 0); - target = SYMBOL_DATA(sym_buf, 8); - domain = SYMBOL_DATA(sym_buf, 16); - addr = SYMBOL_UDATA(sym_buf, 24); - *size = (ssize_t)SYMBOL_UDATA(sym_buf, 32); - - if (type != 1) { - RTE_LOG(INFO, PMD, "wrong symbol type\n"); - ret = -EINVAL; - goto clean; - } - if (!(target == 7 || target == -7)) { - RTE_LOG(INFO, PMD, "wrong symbol target\n"); - ret = -EINVAL; - goto clean; - } - if (domain == 8 || domain == 9) { - RTE_LOG(INFO, PMD, "wrong symbol domain\n"); - ret = -EINVAL; - goto clean; - } - - /* Adjusting address based on symbol location */ - if ((domain >= 24) && (domain < 28) && (target == 7)) { - addr = 1ULL << 37 | addr | ((uint64_t)domain & 0x3) << 35; - } else { - addr = 1ULL << 39 | addr | ((uint64_t)domain & 0x3f) << 32; - if (target == -7) - target = 7; - } - - /* Configuring NFP expansion bar for mapping specific PCI BAR window */ - nfp_nspu_mem_bar_cfg(desc, expbar, target, addr, pcie_offset); - - /* This is the PCI BAR offset to use by the host */ - *pcie_offset |= ((expbar & 0x7) << (desc->barsz - 3)); - -clean: - free(sym_buf); - return ret; -} - -int -nfp_nsp_fw_setup(nspu_desc_t *desc, const char *sym, uint64_t *pcie_offset) -{ - ssize_t bar0_sym_size; - - /* If the symbol resolution works, it implies a firmware app - * is already there. - */ - if (!nfp_nspu_set_bar_from_symbl(desc, sym, NFP_NET_PF_CFG_EXP_BAR, - pcie_offset, &bar0_sym_size)) - return 0; - - /* No firmware app detected or not the right one */ - RTE_LOG(INFO, PMD, "No firmware detected. Resetting NFP...\n"); - if (nfp_fw_reset(desc) < 0) { - RTE_LOG(ERR, PMD, "nfp fw reset failed\n"); - return -ENODEV; - } - - RTE_LOG(INFO, PMD, "Reset done.\n"); - RTE_LOG(INFO, PMD, "Uploading firmware...\n"); - - if (nfp_fw_upload(desc) < 0) { - RTE_LOG(ERR, PMD, "nfp fw upload failed\n"); - return -ENODEV; - } - - RTE_LOG(INFO, PMD, "Done.\n"); - - /* Now the symbol should be there */ - if (nfp_nspu_set_bar_from_symbl(desc, sym, NFP_NET_PF_CFG_EXP_BAR, - pcie_offset, &bar0_sym_size)) { - RTE_LOG(ERR, PMD, "nfp PF BAR symbol resolution failed\n"); - return -ENODEV; - } - - return 0; -} - -int -nfp_nsp_map_ctrl_bar(nspu_desc_t *desc, uint64_t *pcie_offset) -{ - ssize_t bar0_sym_size; - - if (nfp_nspu_set_bar_from_symbl(desc, "_pf0_net_bar0", - NFP_NET_PF_CFG_EXP_BAR, - pcie_offset, &bar0_sym_size)) - return -ENODEV; - - return 0; -} - -/* - * This is a hardcoded fixed NFP internal CPP bus address for the hw queues unit - * inside the PCIE island. - */ -#define NFP_CPP_PCIE_QUEUES ((uint64_t)(1ULL << 39) | 0x80000 | \ - ((uint64_t)0x4 & 0x3f) << 32) - -/* Configure a specific NFP expansion bar for accessing the vNIC rx/tx BARs */ -void -nfp_nsp_map_queues_bar(nspu_desc_t *desc, uint64_t *pcie_offset) -{ - nfp_nspu_mem_bar_cfg(desc, NFP_NET_PF_HW_QUEUES_EXP_BAR, 0, - NFP_CPP_PCIE_QUEUES, pcie_offset); - - /* This is the pcie offset to use by the host */ - *pcie_offset |= ((NFP_NET_PF_HW_QUEUES_EXP_BAR & 0x7) << (27 - 3)); -} - -int -nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up) -{ - union eth_table_entry *entries, *entry; - int modified; - int ret, idx; - int i; - - idx = port; - - RTE_LOG(INFO, PMD, "Hw ethernet port %d configure...\n", port); - rte_spinlock_lock(&desc->nsp_lock); - entries = malloc(NSP_ETH_TABLE_SIZE); - if (!entries) { - rte_spinlock_unlock(&desc->nsp_lock); - return -ENOMEM; - } - - ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, entries, - NSP_ETH_TABLE_SIZE, 0); - if (ret) { - rte_spinlock_unlock(&desc->nsp_lock); - free(entries); - return ret; - } - - entry = entries; - - for (i = 0; i < NSP_ETH_MAX_COUNT; i++) { - /* ports in use do not appear sequentially in the table */ - if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) { - /* entry not in use */ - entry++; - continue; - } - if (idx == 0) - break; - idx--; - entry++; - } - - if (i == NSP_ETH_MAX_COUNT) { - rte_spinlock_unlock(&desc->nsp_lock); - free(entries); - return -EINVAL; - } - - if (up && !(entry->state & NSP_ETH_STATE_CONFIGURED)) { - entry->control |= NSP_ETH_STATE_CONFIGURED; - modified = 1; - } - - if (!up && (entry->state & NSP_ETH_STATE_CONFIGURED)) { - entry->control &= ~NSP_ETH_STATE_CONFIGURED; - modified = 1; - } - - if (modified) { - ret = nspu_command(desc, NSP_CMD_WRITE_ETH_TABLE, 0, 1, entries, - 0, NSP_ETH_TABLE_SIZE); - if (!ret) - RTE_LOG(INFO, PMD, - "Hw ethernet port %d configure done\n", port); - else - RTE_LOG(INFO, PMD, - "Hw ethernet port %d configure failed\n", port); - } - rte_spinlock_unlock(&desc->nsp_lock); - free(entries); - return ret; -} - -int -nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table) -{ - int ret; - - if (!table) - return -EINVAL; - - RTE_LOG(INFO, PMD, "Reading hw ethernet table...\n"); - - /* port 0 allocates the eth table and read it using NSPU */ - *table = malloc(NSP_ETH_TABLE_SIZE); - if (!*table) - return -ENOMEM; - - ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, *table, - NSP_ETH_TABLE_SIZE, 0); - if (ret) - return ret; - - RTE_LOG(INFO, PMD, "Done\n"); - - return 0; -} diff --git a/drivers/net/nfp/nfp_nspu.h b/drivers/net/nfp/nfp_nspu.h deleted file mode 100644 index 8c33835e..00000000 --- a/drivers/net/nfp/nfp_nspu.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2017 Netronome Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution - * - * 3. Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived from this - * software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * vim:shiftwidth=8:noexpandtab - * - * @file dpdk/pmd/nfp_nspu.h - * - * Netronome NFP_NET PDM driver - */ - -/* - * NSP is the NFP Service Processor. NSPU is NSP Userspace interface. - * - * NFP NSP helps with firmware/hardware configuration. NSP is another component - * in NFP programmable processor and accessing it from host requires to firstly - * configure a specific NFP PCI expansion BAR. - * - * Once access is ready, configuration can be done reading and writing - * from/to a specific PF PCI BAR window. This same interface will allow to - * create other PCI BAR windows for accessing other NFP components. - * - * This file includes low-level functions, using the NSPU interface, and high - * level functions, invoked by the PMD for using NSP services. This allows - * firmware upload, vNIC PCI BARs mapping and other low-level configurations - * like link setup. - * - * NSP access is done during initialization and it is not involved at all with - * the fast path. - */ - -#include -#include "nfp_net_eth.h" - -typedef struct { - int nfp; /* NFP device */ - int pcie_bar; /* PF PCI BAR to work with */ - int exp_bar; /* Expansion BAR number used by NSPU */ - int barsz; /* PCIE BAR log2 size */ - uint64_t bufaddr; /* commands buffer address */ - size_t buf_size; /* commands buffer size */ - uint64_t windowsz; /* NSPU BAR window size */ - void *cfg_base; /* Expansion BARs address */ - void *mem_base; /* NSP interface */ - rte_spinlock_t nsp_lock; -} nspu_desc_t; - -int nfp_nspu_init(nspu_desc_t *desc, int nfp, int pcie_bar, size_t pcie_barsz, - int exp_bar, void *exp_bar_cfg_base, void *exp_bar_mmap); -int nfp_nsp_get_abi_version(nspu_desc_t *desc, int *major, int *minor); -int nfp_nsp_fw_setup(nspu_desc_t *desc, const char *sym, uint64_t *pcie_offset); -int nfp_nsp_map_ctrl_bar(nspu_desc_t *desc, uint64_t *pcie_offset); -void nfp_nsp_map_queues_bar(nspu_desc_t *desc, uint64_t *pcie_offset); -int nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up); -int nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table); diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h new file mode 100644 index 00000000..6e380cca --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h @@ -0,0 +1,722 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_CPPAT_H__ +#define __NFP_CPPAT_H__ + +#include "nfp_platform.h" +#include "nfp_resid.h" + +/* This file contains helpers for creating CPP commands + * + * All magic NFP-6xxx IMB 'mode' numbers here are from: + * Databook (1 August 2013) + * - System Overview and Connectivity + * -- Internal Connectivity + * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus + * ---- CPP addressing + * ----- Table 3.6. CPP Address Translation Mode Commands + */ + +#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2 + +static inline int +_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode, + int addr40, int isld1, int isld0); + +static uint64_t +_nic_mask64(int msb, int lsb, int at0) +{ + uint64_t v; + int w = msb - lsb + 1; + + if (w == 64) + return ~(uint64_t)0; + + if ((lsb + w) > 64) + return 0; + + v = (UINT64_C(1) << w) - 1; + + if (at0) + return v; + + return v << lsb; +} + +/* For VQDR, we may not modify the Channel bits, which might overlap + * with the Index bit. When it does, we need to ensure that isld0 == isld1. + */ +static inline int +_nfp6000_encode_basic(uint64_t *addr, int dest_island, int cpp_tgt, int mode, + int addr40, int isld1, int isld0) +{ + uint64_t _u64; + int iid_lsb, idx_lsb; + int i, v = 0; + int isld[2]; + + isld[0] = isld0; + isld[1] = isld1; + + switch (cpp_tgt) { + case NFP6000_CPPTGT_MU: + /* This function doesn't handle MU */ + return NFP_ERRNO(EINVAL); + case NFP6000_CPPTGT_CTXPB: + /* This function doesn't handle CTXPB */ + return NFP_ERRNO(EINVAL); + default: + break; + } + + switch (mode) { + case 0: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + /* + * In this specific mode we'd rather not modify the + * address but we can verify if the existing contents + * will point to a valid island. + */ + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, + isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + /* + * If dest_island is invalid, the current address won't + * go where expected. + */ + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + iid_lsb = (addr40) ? 34 : 26; + + /* <39:34> or <31:26> */ + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + case 1: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + /* + * If dest_island is invalid, the current address won't + * go where expected. + */ + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + idx_lsb = (addr40) ? 39 : 31; + if (dest_island == isld0) { + /* Only need to clear the Index bit */ + *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0); + return 0; + } + + if (dest_island == isld1) { + /* Only need to set the Index bit */ + *addr |= (UINT64_C(1) << idx_lsb); + return 0; + } + + return NFP_ERRNO(ENODEV); + case 2: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + /* iid<0> = addr<30> = channel<0> */ + /* channel<1> = addr<31> = Index */ + + /* + * Special case where we allow channel bits to be set + * before hand and with them select an island. + * So we need to confirm that it's at least plausible. + */ + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + /* + * If dest_island is invalid, the current address won't + * go where expected. + */ + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + /* + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + **/ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 1; + + /* + * Try each option, take first one that fits. Not sure if we + * would want to do some smarter searching and prefer 0 or non-0 + * island IDs. + */ + + for (i = 0; i < 2; i++) { + for (v = 0; v < 2; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + + return NFP_ERRNO(ENODEV); + case 3: + if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) { + /* + * iid<0> = addr<29> = data + * iid<1> = addr<30> = channel<0> + * channel<1> = addr<31> = Index + */ + i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode, + addr40, isld1, isld0); + if (i != 0) + /* Full Island ID and channel bits overlap */ + return i; + + if (dest_island != -1 && dest_island != v) + return NFP_ERRNO(EINVAL); + + /* If dest_island was -1, we don't care */ + return 0; + } + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 2; + + for (i = 0; i < 2; i++) { + for (v = 0; v < 4; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + return NFP_ERRNO(ENODEV); + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode, + int addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb; + + switch (cpp_tgt) { + case NFP6000_CPPTGT_MU: + /* This function doesn't handle MU */ + return NFP_ERRNO(EINVAL); + case NFP6000_CPPTGT_CTXPB: + /* This function doesn't handle CTXPB */ + return NFP_ERRNO(EINVAL); + default: + break; + } + + switch (mode) { + case 0: + /* + * For VQDR, in this mode for 32-bit addressing it would be + * islands 0, 16, 32 and 48 depending on channel and upper + * address bits. Since those are not all valid islands, most + * decode cases would result in bad island IDs, but we do them + * anyway since this is decoding an address that is already + * assumed to be used as-is to get to sram. + */ + iid_lsb = (addr40) ? 34 : 26; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + case 1: + /* + * For VQDR 32-bit, this would decode as: + * Channel 0: island#0 + * Channel 1: island#0 + * Channel 2: island#1 + * Channel 3: island#1 + * + * That would be valid as long as both islands have VQDR. + * Let's allow this. + */ + + idx_lsb = (addr40) ? 39 : 31; + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1; + else + *dest_island = isld0; + + return 0; + case 2: + /* + * For VQDR 32-bit: + * Channel 0: (island#0 | 0) + * Channel 1: (island#0 | 1) + * Channel 2: (island#1 | 0) + * Channel 3: (island#1 | 1) + * + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + */ + isld0 &= ~1; + isld1 &= ~1; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 1; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 1); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 1); + + return 0; + case 3: + /* + * In this mode the data address starts to affect the island ID + * so rather not allow it. In some really specific case one + * could use this to send the upper half of the VQDR channel to + * another MU, but this is getting very specific. However, as + * above for mode 0, this is the decoder and the caller should + * validate the resulting IID. This blindly does what the + * silicon would do. + */ + + isld0 &= ~3; + isld1 &= ~3; + + idx_lsb = (addr40) ? 39 : 31; + iid_lsb = idx_lsb - 2; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 3); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 3); + + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_cppat_mu_locality_lsb(int mode, int addr40) +{ + switch (mode) { + case 0: + case 1: + case 2: + case 3: + return (addr40) ? 38 : 30; + default: + break; + } + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_encode_mu(uint64_t *addr, int dest_island, int mode, int addr40, + int isld1, int isld0) +{ + uint64_t _u64; + int iid_lsb, idx_lsb, locality_lsb; + int i, v; + int isld[2]; + int da; + + isld[0] = isld0; + isld[1] = isld1; + locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40); + + if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT) + da = 1; + else + da = 0; + + switch (mode) { + case 0: + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + case 1: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + } + + idx_lsb = (addr40) ? 37 : 29; + if (dest_island == isld0) { + *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0); + return 0; + } + + if (dest_island == isld1) { + *addr |= (UINT64_C(1) << idx_lsb); + return 0; + } + + return NFP_ERRNO(ENODEV); + case 2: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + } + + /* + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + */ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 1; + + /* + * Try each option, take first one that fits. Not sure if we + * would want to do some smarter searching and prefer 0 or + * non-0 island IDs. + */ + + for (i = 0; i < 2; i++) { + for (v = 0; v < 2; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + return NFP_ERRNO(ENODEV); + case 3: + /* + * Only the EMU will use 40 bit addressing. Silently set the + * direct locality bit for everyone else. The SDK toolchain + * uses dest_island <= 0 to test for atypical address encodings + * to support access to local-island CTM with a 32-but address + * (high-locality is effectively ignored and just used for + * routing to island #0). + */ + if (dest_island > 0 && + (dest_island < 24 || dest_island > 26)) { + *addr |= ((uint64_t)_NIC_NFP6000_MU_LOCALITY_DIRECT) + << locality_lsb; + da = 1; + } + + if (da) { + iid_lsb = (addr40) ? 32 : 24; + _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0); + *addr &= ~_u64; + *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64; + return 0; + } + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 2; + + for (i = 0; i < 2; i++) { + for (v = 0; v < 4; v++) { + if (dest_island != (isld[i] | v)) + continue; + *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0); + *addr |= (((uint64_t)i) << idx_lsb); + *addr |= (((uint64_t)v) << iid_lsb); + return 0; + } + } + + return NFP_ERRNO(ENODEV); + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_decode_mu(uint64_t addr, int *dest_island, int mode, int addr40, + int isld1, int isld0) +{ + int iid_lsb, idx_lsb, locality_lsb; + int da; + + locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40); + + if (((addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT) + da = 1; + else + da = 0; + + switch (mode) { + case 0: + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + case 1: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + } + + idx_lsb = (addr40) ? 37 : 29; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1; + else + *dest_island = isld0; + + return 0; + case 2: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + } + /* + * Make sure we compare against isldN values by clearing the + * LSB. This is what the silicon does. + */ + isld0 &= ~1; + isld1 &= ~1; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 1; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 1); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 1); + + return 0; + case 3: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *dest_island = (int)(addr >> iid_lsb) & 0x3F; + return 0; + } + + isld0 &= ~3; + isld1 &= ~3; + + idx_lsb = (addr40) ? 37 : 29; + iid_lsb = idx_lsb - 2; + + if (addr & _nic_mask64(idx_lsb, idx_lsb, 0)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 3); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 3); + + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_cppat_addr_encode(uint64_t *addr, int dest_island, int cpp_tgt, + int mode, int addr40, int isld1, int isld0) +{ + switch (cpp_tgt) { + case NFP6000_CPPTGT_NBI: + case NFP6000_CPPTGT_VQDR: + case NFP6000_CPPTGT_ILA: + case NFP6000_CPPTGT_PCIE: + case NFP6000_CPPTGT_ARM: + case NFP6000_CPPTGT_CRYPTO: + case NFP6000_CPPTGT_CLS: + return _nfp6000_encode_basic(addr, dest_island, cpp_tgt, mode, + addr40, isld1, isld0); + + case NFP6000_CPPTGT_MU: + return _nfp6000_encode_mu(addr, dest_island, mode, addr40, + isld1, isld0); + + case NFP6000_CPPTGT_CTXPB: + if (mode != 1 || addr40 != 0) + return NFP_ERRNO(EINVAL); + + *addr &= ~_nic_mask64(29, 24, 0); + *addr |= (((uint64_t)dest_island) << 24) & + _nic_mask64(29, 24, 0); + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +static inline int +_nfp6000_cppat_addr_decode(uint64_t addr, int *dest_island, int cpp_tgt, + int mode, int addr40, int isld1, int isld0) +{ + switch (cpp_tgt) { + case NFP6000_CPPTGT_NBI: + case NFP6000_CPPTGT_VQDR: + case NFP6000_CPPTGT_ILA: + case NFP6000_CPPTGT_PCIE: + case NFP6000_CPPTGT_ARM: + case NFP6000_CPPTGT_CRYPTO: + case NFP6000_CPPTGT_CLS: + return _nfp6000_decode_basic(addr, dest_island, cpp_tgt, mode, + addr40, isld1, isld0); + + case NFP6000_CPPTGT_MU: + return _nfp6000_decode_mu(addr, dest_island, mode, addr40, + isld1, isld0); + + case NFP6000_CPPTGT_CTXPB: + if (mode != 1 || addr40 != 0) + return -EINVAL; + *dest_island = (int)(addr >> 24) & 0x3F; + return 0; + default: + break; + } + + return -EINVAL; +} + +static inline int +_nfp6000_cppat_addr_iid_clear(uint64_t *addr, int cpp_tgt, int mode, int addr40) +{ + int iid_lsb, locality_lsb, da; + + switch (cpp_tgt) { + case NFP6000_CPPTGT_NBI: + case NFP6000_CPPTGT_VQDR: + case NFP6000_CPPTGT_ILA: + case NFP6000_CPPTGT_PCIE: + case NFP6000_CPPTGT_ARM: + case NFP6000_CPPTGT_CRYPTO: + case NFP6000_CPPTGT_CLS: + switch (mode) { + case 0: + iid_lsb = (addr40) ? 34 : 26; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + case 1: + iid_lsb = (addr40) ? 39 : 31; + *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0); + return 0; + case 2: + iid_lsb = (addr40) ? 38 : 30; + *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0); + return 0; + case 3: + iid_lsb = (addr40) ? 37 : 29; + *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0); + return 0; + default: + break; + } + case NFP6000_CPPTGT_MU: + locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40); + da = (((*addr >> locality_lsb) & 3) == + _NIC_NFP6000_MU_LOCALITY_DIRECT); + switch (mode) { + case 0: + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + case 1: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + } + iid_lsb = (addr40) ? 37 : 29; + *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0); + return 0; + case 2: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + } + + iid_lsb = (addr40) ? 36 : 28; + *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0); + return 0; + case 3: + if (da) { + iid_lsb = (addr40) ? 32 : 24; + *addr &= ~(UINT64_C(0x3F) << iid_lsb); + return 0; + } + + iid_lsb = (addr40) ? 35 : 27; + *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0); + return 0; + default: + break; + } + case NFP6000_CPPTGT_CTXPB: + if (mode != 1 || addr40 != 0) + return 0; + *addr &= ~(UINT64_C(0x3F) << 24); + return 0; + default: + break; + } + + return NFP_ERRNO(EINVAL); +} + +#endif /* __NFP_CPPAT_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h new file mode 100644 index 00000000..b8541c59 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_PLATFORM_H__ +#define __NFP_PLATFORM_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef BIT_ULL +#define BIT(x) (1 << (x)) +#define BIT_ULL(x) (1ULL << (x)) +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define NFP_ERRNO(err) (errno = (err), -1) +#define NFP_ERRNO_RET(err, ret) (errno = (err), (ret)) +#define NFP_NOERR(errv) (errno) +#define NFP_ERRPTR(err) (errno = (err), NULL) +#define NFP_PTRERR(errv) (errno) + +#endif /* __NFP_PLATFORM_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h new file mode 100644 index 00000000..0e03948e --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h @@ -0,0 +1,592 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_RESID_H__ +#define __NFP_RESID_H__ + +#if (!defined(_NFP_RESID_NO_C_FUNC) && \ + (defined(__NFP_TOOL_NFCC) || defined(__NFP_TOOL_NFAS))) +#define _NFP_RESID_NO_C_FUNC +#endif + +#ifndef _NFP_RESID_NO_C_FUNC +#include "nfp_platform.h" +#endif + +/* + * NFP Chip Architectures + * + * These are semi-arbitrary values to indicate an NFP architecture. + * They serve as a software view of a group of chip families, not necessarily a + * direct mapping to actual hardware design. + */ +#define NFP_CHIP_ARCH_YD 1 +#define NFP_CHIP_ARCH_TH 2 + +/* + * NFP Chip Families. + * + * These are not enums, because they need to be microcode compatible. + * They are also not maskable. + * + * Note: The NFP-4xxx family is handled as NFP-6xxx in most software + * components. + * + */ +#define NFP_CHIP_FAMILY_NFP6000 0x6000 /* ARCH_TH */ + +/* NFP Microengine/Flow Processing Core Versions */ +#define NFP_CHIP_ME_VERSION_2_7 0x0207 +#define NFP_CHIP_ME_VERSION_2_8 0x0208 +#define NFP_CHIP_ME_VERSION_2_9 0x0209 + +/* NFP Chip Base Revisions. Minor stepping can just be added to these */ +#define NFP_CHIP_REVISION_A0 0x00 +#define NFP_CHIP_REVISION_B0 0x10 +#define NFP_CHIP_REVISION_C0 0x20 +#define NFP_CHIP_REVISION_PF 0xff /* Maximum possible revision */ + +/* CPP Targets for each chip architecture */ +#define NFP6000_CPPTGT_NBI 1 +#define NFP6000_CPPTGT_VQDR 2 +#define NFP6000_CPPTGT_ILA 6 +#define NFP6000_CPPTGT_MU 7 +#define NFP6000_CPPTGT_PCIE 9 +#define NFP6000_CPPTGT_ARM 10 +#define NFP6000_CPPTGT_CRYPTO 12 +#define NFP6000_CPPTGT_CTXPB 14 +#define NFP6000_CPPTGT_CLS 15 + +/* + * Wildcard indicating a CPP read or write action + * + * The action used will be either read or write depending on whether a read or + * write instruction/call is performed on the NFP_CPP_ID. It is recomended that + * the RW action is used even if all actions to be performed on a NFP_CPP_ID are + * known to be only reads or writes. Doing so will in many cases save NFP CPP + * internal software resources. + */ +#define NFP_CPP_ACTION_RW 32 + +#define NFP_CPP_TARGET_ID_MASK 0x1f + +/* + * NFP_CPP_ID - pack target, token, and action into a CPP ID. + * + * Create a 32-bit CPP identifier representing the access to be made. + * These identifiers are used as parameters to other NFP CPP functions. Some + * CPP devices may allow wildcard identifiers to be specified. + * + * @param[in] target NFP CPP target id + * @param[in] action NFP CPP action id + * @param[in] token NFP CPP token id + * @return NFP CPP ID + */ +#define NFP_CPP_ID(target, action, token) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8)) + +#define NFP_CPP_ISLAND_ID(target, action, token, island) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8) | (((island) & 0xff) << 0)) + +#ifndef _NFP_RESID_NO_C_FUNC + +/** + * Return the NFP CPP target of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP target + */ +static inline uint8_t +NFP_CPP_ID_TARGET_of(uint32_t id) +{ + return (id >> 24) & NFP_CPP_TARGET_ID_MASK; +} + +/* + * Return the NFP CPP token of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP token + */ +static inline uint8_t +NFP_CPP_ID_TOKEN_of(uint32_t id) +{ + return (id >> 16) & 0xff; +} + +/* + * Return the NFP CPP action of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP action + */ +static inline uint8_t +NFP_CPP_ID_ACTION_of(uint32_t id) +{ + return (id >> 8) & 0xff; +} + +/* + * Return the NFP CPP action of a NFP CPP ID + * @param[in] id NFP CPP ID + * @return NFP CPP action + */ +static inline uint8_t +NFP_CPP_ID_ISLAND_of(uint32_t id) +{ + return (id) & 0xff; +} + +#endif /* _NFP_RESID_NO_C_FUNC */ + +/* + * Check if @p chip_family is an ARCH_TH chip. + * @param chip_family One of NFP_CHIP_FAMILY_* + */ +#define NFP_FAMILY_IS_ARCH_TH(chip_family) \ + ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000) + +/* + * Get the NFP_CHIP_ARCH_* of @p chip_family. + * @param chip_family One of NFP_CHIP_FAMILY_* + */ +#define NFP_FAMILY_ARCH(x) \ + (__extension__ ({ \ + typeof(x) _x = (x); \ + (NFP_FAMILY_IS_ARCH_TH(_x) ? NFP_CHIP_ARCH_TH : \ + NFP_FAMILY_IS_ARCH_YD(_x) ? NFP_CHIP_ARCH_YD : -1) \ + })) + +/* + * Check if @p chip_family is an NFP-6xxx chip. + * @param chip_family One of NFP_CHIP_FAMILY_* + */ +#define NFP_FAMILY_IS_NFP6000(chip_family) \ + ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000) + +/* + * Make microengine ID for NFP-6xxx. + * @param island_id Island ID. + * @param menum ME number, 0 based, within island. + * + * NOTE: menum should really be unsigned - MSC compiler throws error (not + * warning) if a clause is always true i.e. menum >= 0 if cluster_num is type + * unsigned int hence the cast of the menum to an int in that particular clause + */ +#define NFP6000_MEID(a, b) \ + (__extension__ ({ \ + typeof(a) _a = (a); \ + typeof(b) _b = (b); \ + (((((int)(_a) & 0x3F) == (int)(_a)) && \ + (((int)(_b) >= 0) && ((int)(_b) < 12))) ? \ + (int)(((_a) << 4) | ((_b) + 4)) : -1) \ + })) + +/* + * Do a general sanity check on the ME ID. + * The check is on the highest possible island ID for the chip family and the + * microengine number must be a master ID. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_IS_VALID(meid) \ + (__extension__ ({ \ + typeof(meid) _a = (meid); \ + ((((_a) >> 4) < 64) && (((_a) >> 4) >= 0) && \ + (((_a) & 0xF) >= 4)) \ + })) + +/* + * Extract island ID from ME ID. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_ISLAND_of(meid) (((meid) >> 4) & 0x3F) + +/* + * Extract microengine number (0 based) from ME ID. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_MENUM_of(meid) (((meid) & 0xF) - 4) + +/* + * Extract microengine group number (0 based) from ME ID. + * The group is two code-sharing microengines, so group 0 refers to MEs 0,1, + * group 1 refers to MEs 2,3 etc. + * @param meid ME ID as created by NFP6000_MEID + */ +#define NFP6000_MEID_MEGRP_of(meid) (NFP6000_MEID_MENUM_of(meid) >> 1) + +#ifndef _NFP_RESID_NO_C_FUNC + +/* + * Convert a string to an ME ID. + * + * @param s A string of format iX.meY + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME ID part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return ME ID on success, -1 on error. + */ +int nfp6000_idstr2meid(const char *s, const char **endptr); + +/* + * Extract island ID from string. + * + * Example: + * char *c; + * int val = nfp6000_idstr2island("i32.me5", &c); + * // val == 32, c == "me5" + * val = nfp6000_idstr2island("i32", &c); + * // val == 32, c == "" + * + * @param s A string of format "iX.anything" or "iX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the island part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the island ID, -1 on error. + */ +int nfp6000_idstr2island(const char *s, const char **endptr); + +/* + * Extract microengine number from string. + * + * Example: + * char *c; + * int menum = nfp6000_idstr2menum("me5.anything", &c); + * // menum == 5, c == "anything" + * menum = nfp6000_idstr2menum("me5", &c); + * // menum == 5, c == "" + * + * @param s A string of format "meX.anything" or "meX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME number part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the ME number, -1 on error. + */ +int nfp6000_idstr2menum(const char *s, const char **endptr); + +/* + * Extract context number from string. + * + * Example: + * char *c; + * int val = nfp6000_idstr2ctxnum("ctx5.anything", &c); + * // val == 5, c == "anything" + * val = nfp6000_idstr2ctxnum("ctx5", &c); + * // val == 5, c == "" + * + * @param s A string of format "ctxN.anything" or "ctxN" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the context number part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the context number, -1 on error. + */ +int nfp6000_idstr2ctxnum(const char *s, const char **endptr); + +/* + * Extract microengine group number from string. + * + * Example: + * char *c; + * int val = nfp6000_idstr2megrp("tg2.anything", &c); + * // val == 2, c == "anything" + * val = nfp6000_idstr2megrp("tg5", &c); + * // val == 2, c == "" + * + * @param s A string of format "tgX.anything" or "tgX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME group part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the ME group number, -1 on error. + */ +int nfp6000_idstr2megrp(const char *s, const char **endptr); + +/* + * Create ME ID string of format "iX[.meY]". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_meid2str(char *s, int meid); + +/* + * Create ME ID string of format "name[.meY]" or "iX[.meY]". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + * + * Similar to nfp6000_meid2str() except use an alias instead of "iX" + * if one exists for the island. + */ +const char *nfp6000_meid2altstr(char *s, int meid); + +/* + * Create string of format "iX". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_island2str(char *s, int island_id); + +/* + * Create string of format "name", an island alias. + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_island2altstr(char *s, int island_id); + +/* + * Create string of format "meY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param menum Microengine number within island. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_menum2str(char *s, int menum); + +/* + * Create string of format "ctxY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param ctxnum Context number within microengine. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_ctxnum2str(char *s, int ctxnum); + +/* + * Create string of format "tgY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param megrp Microengine group number within cluster. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp6000_megrp2str(char *s, int megrp); + +/* + * Convert a string to an ME ID. + * + * @param chip_family Chip family ID + * @param s A string of format iX.meY (or clX.meY) + * @param endptr If non-NULL, *endptr will point to the trailing + * string after the ME ID part of the string, which + * is either an empty string or the first character + * after the separating period. + * @return ME ID on success, -1 on error. + */ +int nfp_idstr2meid(int chip_family, const char *s, const char **endptr); + +/* + * Extract island ID from string. + * + * Example: + * char *c; + * int val = nfp_idstr2island(chip, "i32.me5", &c); + * // val == 32, c == "me5" + * val = nfp_idstr2island(chip, "i32", &c); + * // val == 32, c == "" + * + * @param chip_family Chip family ID + * @param s A string of format "iX.anything" or "iX" + * @param endptr If non-NULL, *endptr will point to the trailing + * striong after the ME ID part of the string, which + * is either an empty string or the first character + * after the separating period. + * @return The island ID on succes, -1 on error. + */ +int nfp_idstr2island(int chip_family, const char *s, const char **endptr); + +/* + * Extract microengine number from string. + * + * Example: + * char *c; + * int menum = nfp_idstr2menum("me5.anything", &c); + * // menum == 5, c == "anything" + * menum = nfp_idstr2menum("me5", &c); + * // menum == 5, c == "" + * + * @param chip_family Chip family ID + * @param s A string of format "meX.anything" or "meX" + * @param endptr If non-NULL, *endptr will point to the trailing + * striong after the ME ID part of the string, which + * is either an empty string or the first character + * after the separating period. + * @return The ME number on succes, -1 on error. + */ +int nfp_idstr2menum(int chip_family, const char *s, const char **endptr); + +/* + * Extract context number from string. + * + * Example: + * char *c; + * int val = nfp_idstr2ctxnum("ctx5.anything", &c); + * // val == 5, c == "anything" + * val = nfp_idstr2ctxnum("ctx5", &c); + * // val == 5, c == "" + * + * @param s A string of format "ctxN.anything" or "ctxN" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the context number part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the context number, -1 on error. + */ +int nfp_idstr2ctxnum(int chip_family, const char *s, const char **endptr); + +/* + * Extract microengine group number from string. + * + * Example: + * char *c; + * int val = nfp_idstr2megrp("tg2.anything", &c); + * // val == 2, c == "anything" + * val = nfp_idstr2megrp("tg5", &c); + * // val == 5, c == "" + * + * @param s A string of format "tgX.anything" or "tgX" + * @param endptr If non-NULL, *endptr will point to the trailing string + * after the ME group part of the string, which is either + * an empty string or the first character after the separating + * period. + * @return If successful, the ME group number, -1 on error. + */ +int nfp_idstr2megrp(int chip_family, const char *s, const char **endptr); + +/* + * Create ME ID string of format "iX[.meY]". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_meid2str(int chip_family, char *s, int meid); + +/* + * Create ME ID string of format "name[.meY]" or "iX[.meY]". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param meid Microengine ID. + * @return Pointer to "s" on success, NULL on error. + * + * Similar to nfp_meid2str() except use an alias instead of "iX" + * if one exists for the island. + */ +const char *nfp_meid2altstr(int chip_family, char *s, int meid); + +/* + * Create string of format "iX". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_island2str(int chip_family, char *s, int island_id); + +/* + * Create string of format "name", an island alias. + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param island_id Island ID. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_island2altstr(int chip_family, char *s, int island_id); + +/* + * Create string of format "meY". + * + * @param chip_family Chip family ID + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param menum Microengine number within island. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_menum2str(int chip_family, char *s, int menum); + +/* + * Create string of format "ctxY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param ctxnum Context number within microengine. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_ctxnum2str(int chip_family, char *s, int ctxnum); + +/* + * Create string of format "tgY". + * + * @param s Pointer to char buffer of size NFP_MEID_STR_SZ. + * The resulting string is output here. + * @param megrp Microengine group number within cluster. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_megrp2str(int chip_family, char *s, int megrp); + +/* + * Convert a two character string to revision number. + * + * Revision integer is 0x00 for A0, 0x11 for B1 etc. + * + * @param s Two character string. + * @return Revision number, -1 on error + */ +int nfp_idstr2rev(const char *s); + +/* + * Create string from revision number. + * + * String will be upper case. + * + * @param s Pointer to char buffer with size of at least 3 + * for 2 characters and string terminator. + * @param rev Revision number. + * @return Pointer to "s" on success, NULL on error. + */ +const char *nfp_rev2str(char *s, int rev); + +/* + * Get the NFP CPP address from a string + * + * String is in the format [island@]target[:[action:[token:]]address] + * + * @param chip_family Chip family ID + * @param tid Pointer to string to parse + * @param cpp_idp Pointer to CPP ID + * @param cpp_addrp Pointer to CPP address + * @return 0 on success, or -1 and errno + */ +int nfp_str2cpp(int chip_family, + const char *tid, + uint32_t *cpp_idp, + uint64_t *cpp_addrp); + + +#endif /* _NFP_RESID_NO_C_FUNC */ + +#endif /* __NFP_RESID_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h new file mode 100644 index 00000000..47e1ddae --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_NFP6000_H__ +#define __NFP_NFP6000_H__ + +/* CPP Target IDs */ +#define NFP_CPP_TARGET_INVALID 0 +#define NFP_CPP_TARGET_NBI 1 +#define NFP_CPP_TARGET_QDR 2 +#define NFP_CPP_TARGET_ILA 6 +#define NFP_CPP_TARGET_MU 7 +#define NFP_CPP_TARGET_PCIE 9 +#define NFP_CPP_TARGET_ARM 10 +#define NFP_CPP_TARGET_CRYPTO 12 +#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */ +#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */ +#define NFP_CPP_TARGET_CT_XPB 14 +#define NFP_CPP_TARGET_LOCAL_SCRATCH 15 +#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH + +#define NFP_ISL_EMEM0 24 + +#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL +#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL + +static inline int +nfp_cppat_mu_locality_lsb(int mode, int addr40) +{ + switch (mode) { + case 0 ... 3: + return addr40 ? 38 : 30; + default: + return -EINVAL; + } +} + +#endif /* NFP_NFP6000_H */ diff --git a/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h new file mode 100644 index 00000000..7ada1bb2 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_XPB_H__ +#define __NFP_XPB_H__ + +/* + * For use with NFP6000 Databook "XPB Addressing" section + */ +#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24) + +#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000) + +#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F) + +/* + * For use with NFP6000 Databook "XPB Island and Device IDs" chapter + */ +#define NFP_XPB_DEVICE(island, slave, device) \ + (NFP_XPB_OVERLAY(island) | \ + (((slave) & 3) << 22) | \ + (((device) & 0x3f) << 16)) + +#endif /* NFP_XPB_H */ diff --git a/drivers/net/nfp/nfpcore/nfp_cpp.h b/drivers/net/nfp/nfpcore/nfp_cpp.h new file mode 100644 index 00000000..de2ff84e --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_cpp.h @@ -0,0 +1,779 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_CPP_H__ +#define __NFP_CPP_H__ + +#include "nfp-common/nfp_platform.h" +#include "nfp-common/nfp_resid.h" + +struct nfp_cpp_mutex; + +/* + * NFP CPP handle + */ +struct nfp_cpp { + uint32_t model; + uint32_t interface; + uint8_t *serial; + int serial_len; + void *priv; + + /* Mutex cache */ + struct nfp_cpp_mutex *mutex_cache; + const struct nfp_cpp_operations *op; + + /* + * NFP-6xxx originating island IMB CPP Address Translation. CPP Target + * ID is index into array. Values are obtained at runtime from local + * island XPB CSRs. + */ + uint32_t imb_cat_table[16]; + + int driver_lock_needed; +}; + +/* + * NFP CPP device area handle + */ +struct nfp_cpp_area { + struct nfp_cpp *cpp; + char *name; + unsigned long long offset; + unsigned long size; + /* Here follows the 'priv' part of nfp_cpp_area. */ +}; + +/* + * NFP CPP operations structure + */ +struct nfp_cpp_operations { + /* Size of priv area in struct nfp_cpp_area */ + size_t area_priv_size; + + /* Instance an NFP CPP */ + int (*init)(struct nfp_cpp *cpp, const char *devname); + + /* + * Free the bus. + * Called only once, during nfp_cpp_unregister() + */ + void (*free)(struct nfp_cpp *cpp); + + /* + * Initialize a new NFP CPP area + * NOTE: This is _not_ serialized + */ + int (*area_init)(struct nfp_cpp_area *area, + uint32_t dest, + unsigned long long address, + unsigned long size); + /* + * Clean up a NFP CPP area before it is freed + * NOTE: This is _not_ serialized + */ + void (*area_cleanup)(struct nfp_cpp_area *area); + + /* + * Acquire resources for a NFP CPP area + * Serialized + */ + int (*area_acquire)(struct nfp_cpp_area *area); + /* + * Release resources for a NFP CPP area + * Serialized + */ + void (*area_release)(struct nfp_cpp_area *area); + /* + * Return a void IO pointer to a NFP CPP area + * NOTE: This is _not_ serialized + */ + + void *(*area_iomem)(struct nfp_cpp_area *area); + + void *(*area_mapped)(struct nfp_cpp_area *area); + /* + * Perform a read from a NFP CPP area + * Serialized + */ + int (*area_read)(struct nfp_cpp_area *area, + void *kernel_vaddr, + unsigned long offset, + unsigned int length); + /* + * Perform a write to a NFP CPP area + * Serialized + */ + int (*area_write)(struct nfp_cpp_area *area, + const void *kernel_vaddr, + unsigned long offset, + unsigned int length); +}; + +/* + * This should be the only external function the transport + * module supplies + */ +const struct nfp_cpp_operations *nfp_cpp_transport_operations(void); + +/* + * Set the model id + * + * @param cpp NFP CPP operations structure + * @param model Model ID + */ +void nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model); + +/* + * Set the private instance owned data of a nfp_cpp struct + * + * @param cpp NFP CPP operations structure + * @param interface Interface ID + */ +void nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface); + +/* + * Set the private instance owned data of a nfp_cpp struct + * + * @param cpp NFP CPP operations structure + * @param serial NFP serial byte array + * @param len Length of the serial byte array + */ +int nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial, + size_t serial_len); + +/* + * Set the private data of the nfp_cpp instance + * + * @param cpp NFP CPP operations structure + * @return Opaque device pointer + */ +void nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv); + +/* + * Return the private data of the nfp_cpp instance + * + * @param cpp NFP CPP operations structure + * @return Opaque device pointer + */ +void *nfp_cpp_priv(struct nfp_cpp *cpp); + +/* + * Get the privately allocated portion of a NFP CPP area handle + * + * @param cpp_area NFP CPP area handle + * @return Pointer to the private area, or NULL on failure + */ +void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area); + +uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp); + +/* + * NFP CPP core interface for CPP clients. + */ + +/* + * Open a NFP CPP handle to a CPP device + * + * @param[in] id 0-based ID for the CPP interface to use + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp *nfp_cpp_from_device_name(const char *devname, + int driver_lock_needed); + +/* + * Free a NFP CPP handle + * + * @param[in] cpp NFP CPP handle + */ +void nfp_cpp_free(struct nfp_cpp *cpp); + +#define NFP_CPP_MODEL_INVALID 0xffffffff + +/* + * NFP_CPP_MODEL_CHIP_of - retrieve the chip ID from the model ID + * + * The chip ID is a 16-bit BCD+A-F encoding for the chip type. + * + * @param[in] model NFP CPP model id + * @return NFP CPP chip id + */ +#define NFP_CPP_MODEL_CHIP_of(model) (((model) >> 16) & 0xffff) + +/* + * NFP_CPP_MODEL_IS_6000 - Check for the NFP6000 family of devices + * + * NOTE: The NFP4000 series is considered as a NFP6000 series variant. + * + * @param[in] model NFP CPP model id + * @return true if model is in the NFP6000 family, false otherwise. + */ +#define NFP_CPP_MODEL_IS_6000(model) \ + ((NFP_CPP_MODEL_CHIP_of(model) >= 0x4000) && \ + (NFP_CPP_MODEL_CHIP_of(model) < 0x7000)) + +/* + * nfp_cpp_model - Retrieve the Model ID of the NFP + * + * @param[in] cpp NFP CPP handle + * @return NFP CPP Model ID + */ +uint32_t nfp_cpp_model(struct nfp_cpp *cpp); + +/* + * NFP Interface types - logical interface for this CPP connection 4 bits are + * reserved for interface type. + */ +#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0 +#define NFP_CPP_INTERFACE_TYPE_PCI 0x1 +#define NFP_CPP_INTERFACE_TYPE_ARM 0x2 +#define NFP_CPP_INTERFACE_TYPE_RPC 0x3 +#define NFP_CPP_INTERFACE_TYPE_ILA 0x4 + +/* + * Construct a 16-bit NFP Interface ID + * + * Interface IDs consists of 4 bits of interface type, 4 bits of unit + * identifier, and 8 bits of channel identifier. + * + * The NFP Interface ID is used in the implementation of NFP CPP API mutexes, + * which use the MU Atomic CompareAndWrite operation - hence the limit to 16 + * bits to be able to use the NFP Interface ID as a lock owner. + * + * @param[in] type NFP Interface Type + * @param[in] unit Unit identifier for the interface type + * @param[in] channel Channel identifier for the interface unit + * @return Interface ID + */ +#define NFP_CPP_INTERFACE(type, unit, channel) \ + ((((type) & 0xf) << 12) | \ + (((unit) & 0xf) << 8) | \ + (((channel) & 0xff) << 0)) + +/* + * Get the interface type of a NFP Interface ID + * @param[in] interface NFP Interface ID + * @return NFP Interface ID's type + */ +#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf) + +/* + * Get the interface unit of a NFP Interface ID + * @param[in] interface NFP Interface ID + * @return NFP Interface ID's unit + */ +#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf) + +/* + * Get the interface channel of a NFP Interface ID + * @param[in] interface NFP Interface ID + * @return NFP Interface ID's channel + */ +#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff) + +/* + * Retrieve the Interface ID of the NFP + * @param[in] cpp NFP CPP handle + * @return NFP CPP Interface ID + */ +uint16_t nfp_cpp_interface(struct nfp_cpp *cpp); + +/* + * Retrieve the NFP Serial Number (unique per NFP) + * @param[in] cpp NFP CPP handle + * @param[out] serial Pointer to reference the serial number array + * + * @return size of the NFP6000 serial number, in bytes + */ +int nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial); + +/* + * Allocate a NFP CPP area handle, as an offset into a CPP ID + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] size Size of the area to reserve + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, + unsigned long size); + +/* + * Allocate a NFP CPP area handle, as an offset into a CPP ID, by a named owner + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] name Name of owner of the area + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] size Size of the area to reserve + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, + uint32_t cpp_id, + const char *name, + unsigned long long address, + unsigned long size); + +/* + * Free an allocated NFP CPP area handle + * @param[in] area NFP CPP area handle + */ +void nfp_cpp_area_free(struct nfp_cpp_area *area); + +/* + * Acquire the resources needed to access the NFP CPP area handle + * + * @param[in] area NFP CPP area handle + * + * @return 0 on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_acquire(struct nfp_cpp_area *area); + +/* + * Release the resources needed to access the NFP CPP area handle + * + * @param[in] area NFP CPP area handle + */ +void nfp_cpp_area_release(struct nfp_cpp_area *area); + +/* + * Allocate, then acquire the resources needed to access the NFP CPP area handle + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] size Size of the area to reserve + * + * @return NFP CPP handle, or NULL on failure (and set errno accordingly). + */ +struct nfp_cpp_area *nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, + uint32_t cpp_id, + unsigned long long address, + unsigned long size); + +/* + * Release the resources, then free the NFP CPP area handle + * @param[in] area NFP CPP area handle + */ +void nfp_cpp_area_release_free(struct nfp_cpp_area *area); + +uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, + uint64_t addr, unsigned long size, + struct nfp_cpp_area **area); +/* + * Return an IO pointer to the beginning of the NFP CPP area handle. The area + * must be acquired with 'nfp_cpp_area_acquire()' before calling this operation. + * + * @param[in] area NFP CPP area handle + * + * @return Pointer to IO memory, or NULL on failure (and set errno accordingly). + */ +void *nfp_cpp_area_mapped(struct nfp_cpp_area *area); + +/* + * Read from a NFP CPP area handle into a buffer. The area must be acquired with + * 'nfp_cpp_area_acquire()' before calling this operation. + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the area + * @param[in] buffer Location of buffer to receive the data + * @param[in] length Length of the data to read + * + * @return bytes read on success, -1 on failure (and set errno accordingly). + * + */ +int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, + void *buffer, size_t length); + +/* + * Write to a NFP CPP area handle from a buffer. The area must be acquired with + * 'nfp_cpp_area_acquire()' before calling this operation. + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the area + * @param[in] buffer Location of buffer that holds the data + * @param[in] length Length of the data to read + * + * @return bytes written on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, + const void *buffer, size_t length); + +/* + * nfp_cpp_area_iomem() - get IOMEM region for CPP area + * @area: CPP area handle + * + * Returns an iomem pointer for use with readl()/writel() style operations. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: pointer to the area, or NULL + */ +void *nfp_cpp_area_iomem(struct nfp_cpp_area *area); + +/* + * Verify that IO can be performed on an offset in an area + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the area + * @param[in] size Size of region to validate + * + * @return 0 on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_check_range(struct nfp_cpp_area *area, + unsigned long long offset, unsigned long size); + +/* + * Get the NFP CPP handle that is the parent of a NFP CPP area handle + * + * @param cpp_area NFP CPP area handle + * @return NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area); + +/* + * Get the name passed during allocation of the NFP CPP area handle + * + * @param cpp_area NFP CPP area handle + * @return Pointer to the area's name + */ +const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area); + +/* + * Read a block of data from a NFP CPP ID + * + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] kernel_vaddr Buffer to copy read data to + * @param[in] length Size of the area to reserve + * + * @return bytes read on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_read(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, void *kernel_vaddr, size_t length); + +/* + * Write a block of data to a NFP CPP ID + * + * @param[in] cpp NFP CPP handle + * @param[in] cpp_id NFP CPP ID + * @param[in] address Offset into the NFP CPP ID address space + * @param[in] kernel_vaddr Buffer to copy write data from + * @param[in] length Size of the area to reserve + * + * @return bytes written on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_write(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, const void *kernel_vaddr, + size_t length); + + + +/* + * Fill a NFP CPP area handle and offset with a value + * + * @param[in] area NFP CPP area handle + * @param[in] offset Offset into the NFP CPP ID address space + * @param[in] value 32-bit value to fill area with + * @param[in] length Size of the area to reserve + * + * @return bytes written on success, -1 on failure (and set errno accordingly). + */ +int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value, size_t length); + +/* + * Read a single 32-bit value from a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value output value + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 32-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, + uint32_t *value); + +/* + * Write a single 32-bit value to a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value value to write + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 32-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value); + +/* + * Read a single 64-bit value from a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value output value + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 64-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t *value); + +/* + * Write a single 64-bit value to a NFP CPP area handle + * + * @param area NFP CPP area handle + * @param offset offset into NFP CPP area handle + * @param value value to write + * + * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this + * operation. + * + * NOTE: offset must be 64-bit aligned. + * + * @return 0 on success, or -1 on error (and set errno accordingly). + */ +int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t value); + +/* + * Write a single 32-bit value on the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param value value to write + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t value); + +/* + * Read a single 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param value output value + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t *value); + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to modify + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value); + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to monitor for + * @param timeout_us maximum number of us to wait (-1 for forever) + * + * @return >= 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value, int timeout_us); + +/* + * Read a 32-bit word from a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value output value + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint32_t *value); + +/* + * Write a 32-bit value to a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value value to write + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + * + */ +int nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint32_t value); + +/* + * Read a 64-bit work from a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value output value + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint64_t *value); + +/* + * Write a 64-bit value to a NFP CPP ID + * + * @param cpp NFP CPP handle + * @param cpp_id NFP CPP ID + * @param address offset into the NFP CPP ID address space + * @param value value to write + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, + unsigned long long address, uint64_t value); + +/* + * Initialize a mutex location + + * The CPP target:address must point to a 64-bit aligned location, and will + * initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this nfp_cpp_interface(). + * + * This function should only be called when setting up the initial lock state + * upon boot-up of the system. + * + * @param cpp NFP CPP handle + * @param target NFP CPP target ID + * @param address Offset into the address space of the NFP CPP target ID + * @param key_id Unique 32-bit value for this mutex + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, + unsigned long long address, uint32_t key_id); + +/* + * Create a mutex handle from an address controlled by a MU Atomic engine + * + * The CPP target:address must point to a 64-bit aligned location, and reserve + * 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the MU Atomic + * Engine's CmpAndSwap32 command are supported. + * + * @param cpp NFP CPP handle + * @param target NFP CPP target ID + * @param address Offset into the address space of the NFP CPP target ID + * @param key_id 32-bit unique key (must match the key at this location) + * + * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on + * failure. + */ +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, + uint32_t key_id); + +/* + * Get the NFP CPP handle the mutex was created with + * + * @param mutex NFP mutex handle + * @return NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex key + * + * @param mutex NFP mutex handle + * @return Mutex key + */ +uint32_t nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex owner + * + * @param mutex NFP mutex handle + * @return Interface ID of the mutex owner + * + * NOTE: This is for debug purposes ONLY - the owner may change at any time, + * unless it has been locked by this NFP CPP handle. + */ +uint16_t nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex target + * + * @param mutex NFP mutex handle + * @return Mutex CPP target (ie NFP_CPP_TARGET_MU) + */ +int nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex); + +/* + * Get the mutex address + * + * @param mutex NFP mutex handle + * @return Mutex CPP address + */ +uint64_t nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex); + +/* + * Free a mutex handle - does not alter the lock state + * + * @param mutex NFP CPP Mutex handle + */ +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex); + +/* + * Lock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex); + +/* + * Unlock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); + +/* + * Attempt to lock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * @return 0 if the lock succeeded, -1 on failure (and errno set + * appropriately). + */ +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); + +#endif /* !__NFP_CPP_H__ */ diff --git a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c new file mode 100644 index 00000000..2f5e7f6d --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c @@ -0,0 +1,941 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* + * nfp_cpp_pcie_ops.c + * Authors: Vinayak Tammineedi + * + * Multiplexes the NFP BARs between NFP internal resources and + * implements the PCIe specific interface for generic CPP bus access. + * + * The BARs are managed and allocated if they are available. + * The generic CPP bus abstraction builds upon this BAR interface. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "nfp_cpp.h" +#include "nfp_target.h" +#include "nfp6000/nfp6000.h" + +#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0) + +#define NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(_x) (((_x) & 0x1f) << 16) +#define NFP_PCIE_BAR_PCIE2CPP_BASEADDRESS(_x) (((_x) & 0xffff) << 0) +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT(_x) (((_x) & 0x3) << 27) +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT 0 +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT 1 +#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE 3 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE(_x) (((_x) & 0x7) << 29) +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(_x) (((_x) >> 29) & 0x7) +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED 0 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK 1 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_TARGET 2 +#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL 3 +#define NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(_x) (((_x) & 0xf) << 23) +#define NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(_x) (((_x) & 0x3) << 21) + +/* + * Minimal size of the PCIe cfg memory we depend on being mapped, + * queue controller and DMA controller don't have to be covered. + */ +#define NFP_PCI_MIN_MAP_SIZE 0x080000 + +#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2)) +#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4)) +#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4)) + +#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar, slot) \ + (NFP_PCIE_BAR(0) + ((bar) * 8 + (slot)) * 4) + +#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPBAR(bar, slot) \ + (((bar) * 8 + (slot)) * 4) + +/* + * Define to enable a bit more verbose debug output. + * Set to 1 to enable a bit more verbose debug output. + */ +struct nfp_pcie_user; +struct nfp6000_area_priv; + +/* + * struct nfp_bar - describes BAR configuration and usage + * @nfp: backlink to owner + * @barcfg: cached contents of BAR config CSR + * @base: the BAR's base CPP offset + * @mask: mask for the BAR aperture (read only) + * @bitsize: bitsize of BAR aperture (read only) + * @index: index of the BAR + * @lock: lock to specify if bar is in use + * @refcnt: number of current users + * @iomem: mapped IO memory + */ +#define NFP_BAR_MAX 7 +struct nfp_bar { + struct nfp_pcie_user *nfp; + uint32_t barcfg; + uint64_t base; /* CPP address base */ + uint64_t mask; /* Bit mask of the bar */ + uint32_t bitsize; /* Bit size of the bar */ + int index; + int lock; + + char *csr; + char *iomem; +}; + +#define BUSDEV_SZ 13 +struct nfp_pcie_user { + struct nfp_bar bar[NFP_BAR_MAX]; + + int device; + int lock; + char busdev[BUSDEV_SZ]; + int barsz; + char *cfg; +}; + +static uint32_t +nfp_bar_maptype(struct nfp_bar *bar) +{ + return NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(bar->barcfg); +} + +#define TARGET_WIDTH_32 4 +#define TARGET_WIDTH_64 8 + +static int +nfp_compute_bar(const struct nfp_bar *bar, uint32_t *bar_config, + uint64_t *bar_base, int tgt, int act, int tok, + uint64_t offset, size_t size, int width) +{ + uint32_t bitsize; + uint32_t newcfg; + uint64_t mask; + + if (tgt >= 16) + return -EINVAL; + + switch (width) { + case 8: + newcfg = + NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT); + break; + case 4: + newcfg = + NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT); + break; + case 0: + newcfg = + NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT + (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE); + break; + default: + return -EINVAL; + } + + if (act != NFP_CPP_ACTION_RW && act != 0) { + /* Fixed CPP mapping with specific action */ + mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1); + + newcfg |= + NFP_PCIE_BAR_PCIE2CPP_MAPTYPE + (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(act); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) { + printf("BAR%d: Won't use for Fixed mapping\n", + bar->index); + printf("\t<%#llx,%#llx>, action=%d\n", + (unsigned long long)offset, + (unsigned long long)(offset + size), act); + printf("\tBAR too small (0x%llx).\n", + (unsigned long long)mask); + return -EINVAL; + } + offset &= mask; + +#ifdef DEBUG + printf("BAR%d: Created Fixed mapping\n", bar->index); + printf("\t%d:%d:%d:0x%#llx-0x%#llx>\n", tgt, act, tok, + (unsigned long long)offset, + (unsigned long long)(offset + mask)); +#endif + + bitsize = 40 - 16; + } else { + mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1); + + /* Bulk mapping */ + newcfg |= + NFP_PCIE_BAR_PCIE2CPP_MAPTYPE + (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK); + + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) { + printf("BAR%d: Won't use for bulk mapping\n", + bar->index); + printf("\t<%#llx,%#llx>\n", (unsigned long long)offset, + (unsigned long long)(offset + size)); + printf("\ttarget=%d, token=%d\n", tgt, tok); + printf("\tBAR too small (%#llx) - (%#llx != %#llx).\n", + (unsigned long long)mask, + (unsigned long long)(offset & mask), + (unsigned long long)(offset + size - 1) & mask); + + return -EINVAL; + } + + offset &= mask; + +#ifdef DEBUG + printf("BAR%d: Created bulk mapping %d:x:%d:%#llx-%#llx\n", + bar->index, tgt, tok, (unsigned long long)offset, + (unsigned long long)(offset + ~mask)); +#endif + + bitsize = 40 - 21; + } + + if (bar->bitsize < bitsize) { + printf("BAR%d: Too small for %d:%d:%d\n", bar->index, tgt, tok, + act); + return -EINVAL; + } + + newcfg |= offset >> bitsize; + + if (bar_base) + *bar_base = offset; + + if (bar_config) + *bar_config = newcfg; + + return 0; +} + +static int +nfp_bar_write(struct nfp_pcie_user *nfp, struct nfp_bar *bar, + uint32_t newcfg) +{ + int base, slot; + + base = bar->index >> 3; + slot = bar->index & 7; + + if (!nfp->cfg) + return (-ENOMEM); + + bar->csr = nfp->cfg + + NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(base, slot); + + *(uint32_t *)(bar->csr) = newcfg; + + bar->barcfg = newcfg; +#ifdef DEBUG + printf("BAR%d: updated to 0x%08x\n", bar->index, newcfg); +#endif + + return 0; +} + +static int +nfp_reconfigure_bar(struct nfp_pcie_user *nfp, struct nfp_bar *bar, int tgt, + int act, int tok, uint64_t offset, size_t size, int width) +{ + uint64_t newbase; + uint32_t newcfg; + int err; + + err = nfp_compute_bar(bar, &newcfg, &newbase, tgt, act, tok, offset, + size, width); + if (err) + return err; + + bar->base = newbase; + + return nfp_bar_write(nfp, bar, newcfg); +} + +/* + * Map all PCI bars. We assume that the BAR with the PCIe config block is + * already mapped. + * + * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM) + */ +static int +nfp_enable_bars(struct nfp_pcie_user *nfp) +{ + struct nfp_bar *bar; + int x; + + for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) { + bar = &nfp->bar[x - 1]; + bar->barcfg = 0; + bar->nfp = nfp; + bar->index = x; + bar->mask = (1 << (nfp->barsz - 3)) - 1; + bar->bitsize = nfp->barsz - 3; + bar->base = 0; + bar->iomem = NULL; + bar->lock = 0; + bar->csr = nfp->cfg + + NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar->index >> 3, + bar->index & 7); + bar->iomem = + (char *)mmap(0, 1 << bar->bitsize, PROT_READ | PROT_WRITE, + MAP_SHARED, nfp->device, + bar->index << bar->bitsize); + + if (bar->iomem == MAP_FAILED) + return (-ENOMEM); + } + return 0; +} + +static struct nfp_bar * +nfp_alloc_bar(struct nfp_pcie_user *nfp) +{ + struct nfp_bar *bar; + int x; + + for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) { + bar = &nfp->bar[x - 1]; + if (!bar->lock) { + bar->lock = 1; + return bar; + } + } + return NULL; +} + +static void +nfp_disable_bars(struct nfp_pcie_user *nfp) +{ + struct nfp_bar *bar; + int x; + + for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) { + bar = &nfp->bar[x - 1]; + if (bar->iomem) { + munmap(bar->iomem, 1 << (nfp->barsz - 3)); + bar->iomem = NULL; + bar->lock = 0; + } + } +} + +/* + * Generic CPP bus access interface. + */ + +struct nfp6000_area_priv { + struct nfp_bar *bar; + uint32_t bar_offset; + + uint32_t target; + uint32_t action; + uint32_t token; + uint64_t offset; + struct { + int read; + int write; + int bar; + } width; + size_t size; + char *iomem; +}; + +static int +nfp6000_area_init(struct nfp_cpp_area *area, uint32_t dest, + unsigned long long address, unsigned long size) +{ + struct nfp_pcie_user *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + uint32_t target = NFP_CPP_ID_TARGET_of(dest); + uint32_t action = NFP_CPP_ID_ACTION_of(dest); + uint32_t token = NFP_CPP_ID_TOKEN_of(dest); + int pp, ret = 0; + + pp = nfp6000_target_pushpull(NFP_CPP_ID(target, action, token), + address); + if (pp < 0) + return pp; + + priv->width.read = PUSH_WIDTH(pp); + priv->width.write = PULL_WIDTH(pp); + + if (priv->width.read > 0 && + priv->width.write > 0 && priv->width.read != priv->width.write) + return -EINVAL; + + if (priv->width.read > 0) + priv->width.bar = priv->width.read; + else + priv->width.bar = priv->width.write; + + priv->bar = nfp_alloc_bar(nfp); + if (priv->bar == NULL) + return -ENOMEM; + + priv->target = target; + priv->action = action; + priv->token = token; + priv->offset = address; + priv->size = size; + + ret = nfp_reconfigure_bar(nfp, priv->bar, priv->target, priv->action, + priv->token, priv->offset, priv->size, + priv->width.bar); + + return ret; +} + +static int +nfp6000_area_acquire(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + /* Calculate offset into BAR. */ + if (nfp_bar_maptype(priv->bar) == + NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL) { + priv->bar_offset = priv->offset & + (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1); + priv->bar_offset += + NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(priv->bar, + priv->target); + priv->bar_offset += + NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(priv->bar, priv->token); + } else { + priv->bar_offset = priv->offset & priv->bar->mask; + } + + /* Must have been too big. Sub-allocate. */ + if (!priv->bar->iomem) + return (-ENOMEM); + + priv->iomem = priv->bar->iomem + priv->bar_offset; + + return 0; +} + +static void * +nfp6000_area_mapped(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *area_priv = nfp_cpp_area_priv(area); + + if (!area_priv->iomem) + return NULL; + + return area_priv->iomem; +} + +static void +nfp6000_area_release(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + priv->bar->lock = 0; + priv->bar = NULL; + priv->iomem = NULL; +} + +static void * +nfp6000_area_iomem(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + return priv->iomem; +} + +static int +nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + uint64_t *wrptr64 = kernel_vaddr; + const volatile uint64_t *rdptr64; + struct nfp6000_area_priv *priv; + uint32_t *wrptr32 = kernel_vaddr; + const volatile uint32_t *rdptr32; + int width; + unsigned int n; + bool is_64; + + priv = nfp_cpp_area_priv(area); + rdptr64 = (uint64_t *)(priv->iomem + offset); + rdptr32 = (uint32_t *)(priv->iomem + offset); + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.read; + + if (width <= 0) + return -EINVAL; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) { + printf("aread_read unaligned!!!\n"); + return -EINVAL; + } + + is_64 = width == TARGET_WIDTH_64; + + /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW) { + is_64 = false; + } + + if (is_64) { + if (offset % sizeof(uint64_t) != 0 || + length % sizeof(uint64_t) != 0) + return -EINVAL; + } else { + if (offset % sizeof(uint32_t) != 0 || + length % sizeof(uint32_t) != 0) + return -EINVAL; + } + + if (!priv->bar) + return -EFAULT; + + if (is_64) + for (n = 0; n < length; n += sizeof(uint64_t)) { + *wrptr64 = *rdptr64; + wrptr64++; + rdptr64++; + } + else + for (n = 0; n < length; n += sizeof(uint32_t)) { + *wrptr32 = *rdptr32; + wrptr32++; + rdptr32++; + } + + return n; +} + +static int +nfp6000_area_write(struct nfp_cpp_area *area, const void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + const uint64_t *rdptr64 = kernel_vaddr; + uint64_t *wrptr64; + const uint32_t *rdptr32 = kernel_vaddr; + struct nfp6000_area_priv *priv; + uint32_t *wrptr32; + int width; + unsigned int n; + bool is_64; + + priv = nfp_cpp_area_priv(area); + wrptr64 = (uint64_t *)(priv->iomem + offset); + wrptr32 = (uint32_t *)(priv->iomem + offset); + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.write; + + if (width <= 0) + return -EINVAL; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) + return -EINVAL; + + is_64 = width == TARGET_WIDTH_64; + + /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW) + is_64 = false; + + if (is_64) { + if (offset % sizeof(uint64_t) != 0 || + length % sizeof(uint64_t) != 0) + return -EINVAL; + } else { + if (offset % sizeof(uint32_t) != 0 || + length % sizeof(uint32_t) != 0) + return -EINVAL; + } + + if (!priv->bar) + return -EFAULT; + + if (is_64) + for (n = 0; n < length; n += sizeof(uint64_t)) { + *wrptr64 = *rdptr64; + wrptr64++; + rdptr64++; + } + else + for (n = 0; n < length; n += sizeof(uint32_t)) { + *wrptr32 = *rdptr32; + wrptr32++; + rdptr32++; + } + + return n; +} + +#define PCI_DEVICES "/sys/bus/pci/devices" + +static int +nfp_acquire_process_lock(struct nfp_pcie_user *desc) +{ + int rc; + struct flock lock; + char lockname[30]; + + memset(&lock, 0, sizeof(lock)); + + snprintf(lockname, sizeof(lockname), "/var/lock/nfp_%s", desc->busdev); + desc->lock = open(lockname, O_RDWR | O_CREAT, 0666); + if (desc->lock < 0) + return desc->lock; + + lock.l_type = F_WRLCK; + lock.l_whence = SEEK_SET; + rc = -1; + while (rc != 0) { + rc = fcntl(desc->lock, F_SETLKW, &lock); + if (rc < 0) { + if (errno != EAGAIN && errno != EACCES) { + close(desc->lock); + return rc; + } + } + } + + return 0; +} + +static int +nfp6000_set_model(struct nfp_pcie_user *desc, struct nfp_cpp *cpp) +{ + char tmp_str[80]; + uint32_t tmp; + int fp; + + snprintf(tmp_str, sizeof(tmp_str), "%s/%s/config", PCI_DEVICES, + desc->busdev); + + fp = open(tmp_str, O_RDONLY); + if (!fp) + return -1; + + lseek(fp, 0x2e, SEEK_SET); + + if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) { + printf("Error reading config file for model\n"); + return -1; + } + + tmp = tmp << 16; + + if (close(fp) == -1) + return -1; + + nfp_cpp_model_set(cpp, tmp); + + return 0; +} + +static int +nfp6000_set_interface(struct nfp_pcie_user *desc, struct nfp_cpp *cpp) +{ + char tmp_str[80]; + uint16_t tmp; + int fp; + + snprintf(tmp_str, sizeof(tmp_str), "%s/%s/config", PCI_DEVICES, + desc->busdev); + + fp = open(tmp_str, O_RDONLY); + if (!fp) + return -1; + + lseek(fp, 0x154, SEEK_SET); + + if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) { + printf("error reading config file for interface\n"); + return -1; + } + + if (close(fp) == -1) + return -1; + + nfp_cpp_interface_set(cpp, tmp); + + return 0; +} + +#define PCI_CFG_SPACE_SIZE 256 +#define PCI_CFG_SPACE_EXP_SIZE 4096 +#define PCI_EXT_CAP_ID(header) (int)(header & 0x0000ffff) +#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc) +#define PCI_EXT_CAP_ID_DSN 0x03 +static int +nfp_pci_find_next_ext_capability(int fp, int cap) +{ + uint32_t header; + int ttl; + int pos = PCI_CFG_SPACE_SIZE; + + /* minimum 8 bytes per capability */ + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; + + lseek(fp, pos, SEEK_SET); + if (read(fp, &header, sizeof(header)) != sizeof(header)) { + printf("error reading config file for serial\n"); + return -1; + } + + /* + * If we have no capabilities, this is indicated by cap ID, + * cap version and next pointer all being 0. + */ + if (header == 0) + return 0; + + while (ttl-- > 0) { + if (PCI_EXT_CAP_ID(header) == cap) + return pos; + + pos = PCI_EXT_CAP_NEXT(header); + if (pos < PCI_CFG_SPACE_SIZE) + break; + + lseek(fp, pos, SEEK_SET); + if (read(fp, &header, sizeof(header)) != sizeof(header)) { + printf("error reading config file for serial\n"); + return -1; + } + } + + return 0; +} + +static int +nfp6000_set_serial(struct nfp_pcie_user *desc, struct nfp_cpp *cpp) +{ + char tmp_str[80]; + uint16_t tmp; + uint8_t serial[6]; + int serial_len = 6; + int fp, pos; + + snprintf(tmp_str, sizeof(tmp_str), "%s/%s/config", PCI_DEVICES, + desc->busdev); + + fp = open(tmp_str, O_RDONLY); + if (!fp) + return -1; + + pos = nfp_pci_find_next_ext_capability(fp, PCI_EXT_CAP_ID_DSN); + if (pos <= 0) { + printf("PCI_EXT_CAP_ID_DSN not found. Using default offset\n"); + lseek(fp, 0x156, SEEK_SET); + } else { + lseek(fp, pos + 6, SEEK_SET); + } + + if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) { + printf("error reading config file for serial\n"); + return -1; + } + + serial[4] = (uint8_t)((tmp >> 8) & 0xff); + serial[5] = (uint8_t)(tmp & 0xff); + + if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) { + printf("error reading config file for serial\n"); + return -1; + } + + serial[2] = (uint8_t)((tmp >> 8) & 0xff); + serial[3] = (uint8_t)(tmp & 0xff); + + if (read(fp, &tmp, sizeof(tmp)) != sizeof(tmp)) { + printf("error reading config file for serial\n"); + return -1; + } + + serial[0] = (uint8_t)((tmp >> 8) & 0xff); + serial[1] = (uint8_t)(tmp & 0xff); + + if (close(fp) == -1) + return -1; + + nfp_cpp_serial_set(cpp, serial, serial_len); + + return 0; +} + +static int +nfp6000_set_barsz(struct nfp_pcie_user *desc) +{ + char tmp_str[80]; + unsigned long start, end, flags, tmp; + int i; + FILE *fp; + + snprintf(tmp_str, sizeof(tmp_str), "%s/%s/resource", PCI_DEVICES, + desc->busdev); + + fp = fopen(tmp_str, "r"); + if (!fp) + return -1; + + if (fscanf(fp, "0x%lx 0x%lx 0x%lx", &start, &end, &flags) == 0) { + printf("error reading resource file for bar size\n"); + fclose(fp); + return -1; + } + + if (fclose(fp) == -1) + return -1; + + tmp = (end - start) + 1; + i = 0; + while (tmp >>= 1) + i++; + desc->barsz = i; + return 0; +} + +static int +nfp6000_init(struct nfp_cpp *cpp, const char *devname) +{ + char link[120]; + char tmp_str[80]; + ssize_t size; + int ret = 0; + uint32_t model; + struct nfp_pcie_user *desc; + + desc = malloc(sizeof(*desc)); + if (!desc) + return -1; + + + memset(desc->busdev, 0, BUSDEV_SZ); + strlcpy(desc->busdev, devname, sizeof(desc->busdev)); + + if (cpp->driver_lock_needed) { + ret = nfp_acquire_process_lock(desc); + if (ret) + return -1; + } + + snprintf(tmp_str, sizeof(tmp_str), "%s/%s/driver", PCI_DEVICES, + desc->busdev); + + size = readlink(tmp_str, link, sizeof(link)); + + if (size == -1) + tmp_str[0] = '\0'; + + if (size == sizeof(link)) + tmp_str[0] = '\0'; + + snprintf(tmp_str, sizeof(tmp_str), "%s/%s/resource0", PCI_DEVICES, + desc->busdev); + + desc->device = open(tmp_str, O_RDWR); + if (desc->device == -1) + return -1; + + if (nfp6000_set_model(desc, cpp) < 0) + return -1; + if (nfp6000_set_interface(desc, cpp) < 0) + return -1; + if (nfp6000_set_serial(desc, cpp) < 0) + return -1; + if (nfp6000_set_barsz(desc) < 0) + return -1; + + desc->cfg = (char *)mmap(0, 1 << (desc->barsz - 3), + PROT_READ | PROT_WRITE, + MAP_SHARED, desc->device, 0); + + if (desc->cfg == MAP_FAILED) + return -1; + + nfp_enable_bars(desc); + + nfp_cpp_priv_set(cpp, desc); + + model = __nfp_cpp_model_autodetect(cpp); + nfp_cpp_model_set(cpp, model); + + return ret; +} + +static void +nfp6000_free(struct nfp_cpp *cpp) +{ + struct nfp_pcie_user *desc = nfp_cpp_priv(cpp); + int x; + + /* Unmap may cause if there are any pending transaxctions */ + nfp_disable_bars(desc); + munmap(desc->cfg, 1 << (desc->barsz - 3)); + + for (x = ARRAY_SIZE(desc->bar); x > 0; x--) { + if (desc->bar[x - 1].iomem) + munmap(desc->bar[x - 1].iomem, 1 << (desc->barsz - 3)); + } + if (cpp->driver_lock_needed) + close(desc->lock); + close(desc->device); + free(desc); +} + +static const struct nfp_cpp_operations nfp6000_pcie_ops = { + .init = nfp6000_init, + .free = nfp6000_free, + + .area_priv_size = sizeof(struct nfp6000_area_priv), + .area_init = nfp6000_area_init, + .area_acquire = nfp6000_area_acquire, + .area_release = nfp6000_area_release, + .area_mapped = nfp6000_area_mapped, + .area_read = nfp6000_area_read, + .area_write = nfp6000_area_write, + .area_iomem = nfp6000_area_iomem, +}; + +const struct +nfp_cpp_operations *nfp_cpp_transport_operations(void) +{ + return &nfp6000_pcie_ops; +} diff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c new file mode 100644 index 00000000..f61143f7 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c @@ -0,0 +1,857 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "nfp_cpp.h" +#include "nfp_target.h" +#include "nfp6000/nfp6000.h" +#include "nfp6000/nfp_xpb.h" +#include "nfp_nffw.h" + +#define NFP_PL_DEVICE_ID 0x00000004 +#define NFP_PL_DEVICE_ID_MASK 0xff + +#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144 + +void +nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv) +{ + cpp->priv = priv; +} + +void * +nfp_cpp_priv(struct nfp_cpp *cpp) +{ + return cpp->priv; +} + +void +nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model) +{ + cpp->model = model; +} + +uint32_t +nfp_cpp_model(struct nfp_cpp *cpp) +{ + if (!cpp) + return NFP_CPP_MODEL_INVALID; + + if (cpp->model == 0) + cpp->model = __nfp_cpp_model_autodetect(cpp); + + return cpp->model; +} + +void +nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface) +{ + cpp->interface = interface; +} + +int +nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial) +{ + *serial = cpp->serial; + return cpp->serial_len; +} + +int +nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial, + size_t serial_len) +{ + if (cpp->serial_len) + free(cpp->serial); + + cpp->serial = malloc(serial_len); + if (!cpp->serial) + return -1; + + memcpy(cpp->serial, serial, serial_len); + cpp->serial_len = serial_len; + + return 0; +} + +uint16_t +nfp_cpp_interface(struct nfp_cpp *cpp) +{ + if (!cpp) + return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_INVALID, 0, 0); + + return cpp->interface; +} + +void * +nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area) +{ + return &cpp_area[1]; +} + +struct nfp_cpp * +nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->cpp; +} + +const char * +nfp_cpp_area_name(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->name; +} + +/* + * nfp_cpp_area_alloc - allocate a new CPP area + * @cpp: CPP handle + * @dest: CPP id + * @address: start address on CPP target + * @size: size of area in bytes + * + * Allocate and initialize a CPP area structure. The area must later + * be locked down with an 'acquire' before it can be safely accessed. + * + * NOTE: @address and @size must be 32-bit aligned values. + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, uint32_t dest, + const char *name, unsigned long long address, + unsigned long size) +{ + struct nfp_cpp_area *area; + uint64_t tmp64 = (uint64_t)address; + int tmp, err; + + if (!cpp) + return NULL; + + /* CPP bus uses only a 40-bit address */ + if ((address + size) > (1ULL << 40)) + return NFP_ERRPTR(EFAULT); + + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table); + if (err < 0) + return NULL; + + address = (unsigned long long)tmp64; + + if (!name) + name = ""; + + area = calloc(1, sizeof(*area) + cpp->op->area_priv_size + + strlen(name) + 1); + if (!area) + return NULL; + + area->cpp = cpp; + area->name = ((char *)area) + sizeof(*area) + cpp->op->area_priv_size; + memcpy(area->name, name, strlen(name) + 1); + + /* + * Preserve errno around the call to area_init, since most + * implementations will blindly call nfp_target_action_width()for both + * read or write modes, and that will set errno to EINVAL. + */ + tmp = errno; + + err = cpp->op->area_init(area, dest, address, size); + if (err < 0) { + free(area); + return NULL; + } + + /* Restore errno */ + errno = tmp; + + area->offset = address; + area->size = size; + + return area; +} + +struct nfp_cpp_area * +nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest, + unsigned long long address, unsigned long size) +{ + return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size); +} + +/* + * nfp_cpp_area_alloc_acquire - allocate a new CPP area and lock it down + * + * @cpp: CPP handle + * @dest: CPP id + * @address: start address on CPP target + * @size: size of area + * + * Allocate and initilizae a CPP area structure, and lock it down so + * that it can be accessed directly. + * + * NOTE: @address and @size must be 32-bit aligned values. + * + * NOTE: The area must also be 'released' when the structure is freed. + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, uint32_t destination, + unsigned long long address, unsigned long size) +{ + struct nfp_cpp_area *area; + + area = nfp_cpp_area_alloc(cpp, destination, address, size); + if (!area) + return NULL; + + if (nfp_cpp_area_acquire(area)) { + nfp_cpp_area_free(area); + return NULL; + } + + return area; +} + +/* + * nfp_cpp_area_free - free up the CPP area + * area: CPP area handle + * + * Frees up memory resources held by the CPP area. + */ +void +nfp_cpp_area_free(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_cleanup) + area->cpp->op->area_cleanup(area); + free(area); +} + +/* + * nfp_cpp_area_release_free - release CPP area and free it + * area: CPP area handle + * + * Releases CPP area and frees up memory resources held by the it. + */ +void +nfp_cpp_area_release_free(struct nfp_cpp_area *area) +{ + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); +} + +/* + * nfp_cpp_area_acquire - lock down a CPP area for access + * @area: CPP area handle + * + * Locks down the CPP area for a potential long term activity. Area + * must always be locked down before being accessed. + */ +int +nfp_cpp_area_acquire(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_acquire) { + int err = area->cpp->op->area_acquire(area); + + if (err < 0) + return -1; + } + + return 0; +} + +/* + * nfp_cpp_area_release - release a locked down CPP area + * @area: CPP area handle + * + * Releases a previously locked down CPP area. + */ +void +nfp_cpp_area_release(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_release) + area->cpp->op->area_release(area); +} + +/* + * nfp_cpp_area_iomem() - get IOMEM region for CPP area + * + * @area: CPP area handle + * + * Returns an iomem pointer for use with readl()/writel() style operations. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: pointer to the area, or NULL + */ +void * +nfp_cpp_area_iomem(struct nfp_cpp_area *area) +{ + void *iomem = NULL; + + if (area->cpp->op->area_iomem) + iomem = area->cpp->op->area_iomem(area); + + return iomem; +} + +/* + * nfp_cpp_area_read - read data from CPP area + * + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to put data into + * @length: number of bytes to read + * + * Read data from indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * + * NOTE: Area must have been locked down with an 'acquire'. + */ +int +nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, + void *kernel_vaddr, size_t length) +{ + if ((offset + length) > area->size) + return NFP_ERRNO(EFAULT); + + return area->cpp->op->area_read(area, kernel_vaddr, offset, length); +} + +/* + * nfp_cpp_area_write - write data to CPP area + * + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to read data from + * @length: number of bytes to write + * + * Write data to indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * + * NOTE: Area must have been locked down with an 'acquire'. + */ +int +nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, + const void *kernel_vaddr, size_t length) +{ + if ((offset + length) > area->size) + return NFP_ERRNO(EFAULT); + + return area->cpp->op->area_write(area, kernel_vaddr, offset, length); +} + +void * +nfp_cpp_area_mapped(struct nfp_cpp_area *area) +{ + if (area->cpp->op->area_mapped) + return area->cpp->op->area_mapped(area); + return NULL; +} + +/* + * nfp_cpp_area_check_range - check if address range fits in CPP area + * + * @area: CPP area handle + * @offset: offset into CPP area + * @length: size of address range in bytes + * + * Check if address range fits within CPP area. Return 0 if area fits + * or -1 on error. + */ +int +nfp_cpp_area_check_range(struct nfp_cpp_area *area, unsigned long long offset, + unsigned long length) +{ + if (((offset + length) > area->size)) + return NFP_ERRNO(EFAULT); + + return 0; +} + +/* + * Return the correct CPP address, and fixup xpb_addr as needed, + * based upon NFP model. + */ +static uint32_t +nfp_xpb_to_cpp(struct nfp_cpp *cpp, uint32_t *xpb_addr) +{ + uint32_t xpb; + int island; + + if (!NFP_CPP_MODEL_IS_6000(cpp->model)) + return 0; + + xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0); + + /* + * Ensure that non-local XPB accesses go out through the + * global XPBM bus. + */ + island = ((*xpb_addr) >> 24) & 0x3f; + + if (!island) + return xpb; + + if (island == 1) { + /* + * Accesses to the ARM Island overlay uses Island 0 + * Global Bit + */ + (*xpb_addr) &= ~0x7f000000; + if (*xpb_addr < 0x60000) + *xpb_addr |= (1 << 30); + else + /* And only non-ARM interfaces use island id = 1 */ + if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) != + NFP_CPP_INTERFACE_TYPE_ARM) + *xpb_addr |= (1 << 24); + } else { + (*xpb_addr) |= (1 << 30); + } + + return xpb; +} + +int +nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, + uint32_t *value) +{ + int sz; + uint32_t tmp = 0; + + sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_32(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value) +{ + int sz; + + value = rte_cpu_to_le_32(value); + sz = nfp_cpp_area_write(area, offset, &value, sizeof(value)); + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t *value) +{ + int sz; + uint64_t tmp = 0; + + sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_64(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, + uint64_t value) +{ + int sz; + + value = rte_cpu_to_le_64(value); + sz = nfp_cpp_area_write(area, offset, &value, sizeof(value)); + + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint32_t *value) +{ + int sz; + uint32_t tmp; + + sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_32(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint32_t value) +{ + int sz; + + value = rte_cpu_to_le_32(value); + sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value)); + + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint64_t *value) +{ + int sz; + uint64_t tmp; + + sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp)); + *value = rte_le_to_cpu_64(tmp); + + return (sz == sizeof(*value)) ? 0 : -1; +} + +int +nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address, + uint64_t value) +{ + int sz; + + value = rte_cpu_to_le_64(value); + sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value)); + + return (sz == sizeof(value)) ? 0 : -1; +} + +int +nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t value) +{ + uint32_t cpp_dest; + + cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value); +} + +int +nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t *value) +{ + uint32_t cpp_dest; + + cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value); +} + +static struct nfp_cpp * +nfp_cpp_alloc(const char *devname, int driver_lock_needed) +{ + const struct nfp_cpp_operations *ops; + struct nfp_cpp *cpp; + int err; + + ops = nfp_cpp_transport_operations(); + + if (!ops || !ops->init) + return NFP_ERRPTR(EINVAL); + + cpp = calloc(1, sizeof(*cpp)); + if (!cpp) + return NULL; + + cpp->op = ops; + cpp->driver_lock_needed = driver_lock_needed; + + if (cpp->op->init) { + err = cpp->op->init(cpp, devname); + if (err < 0) { + free(cpp); + return NULL; + } + } + + if (NFP_CPP_MODEL_IS_6000(nfp_cpp_model(cpp))) { + uint32_t xpbaddr; + size_t tgt; + + for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) { + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + (tgt * 4); + err = nfp_xpb_readl(cpp, xpbaddr, + (uint32_t *)&cpp->imb_cat_table[tgt]); + if (err < 0) { + free(cpp); + return NULL; + } + } + } + + return cpp; +} + +/* + * nfp_cpp_free - free the CPP handle + * @cpp: CPP handle + */ +void +nfp_cpp_free(struct nfp_cpp *cpp) +{ + if (cpp->op && cpp->op->free) + cpp->op->free(cpp); + + if (cpp->serial_len) + free(cpp->serial); + + free(cpp); +} + +struct nfp_cpp * +nfp_cpp_from_device_name(const char *devname, int driver_lock_needed) +{ + return nfp_cpp_alloc(devname, driver_lock_needed); +} + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to modify + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value) +{ + int err; + uint32_t tmp; + + err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); + if (err < 0) + return err; + + tmp &= ~mask; + tmp |= (mask & value); + return nfp_xpb_writel(cpp, xpb_tgt, tmp); +} + +/* + * Modify bits of a 32-bit value from the XPB bus + * + * @param cpp NFP CPP device handle + * @param xpb_tgt XPB target and address + * @param mask mask of bits to alter + * @param value value to monitor for + * @param timeout_us maximum number of us to wait (-1 for forever) + * + * @return >= 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask, + uint32_t value, int timeout_us) +{ + uint32_t tmp; + int err; + + do { + err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); + if (err < 0) + goto exit; + + if ((tmp & mask) == (value & mask)) { + if (timeout_us < 0) + timeout_us = 0; + break; + } + + if (timeout_us < 0) + continue; + + timeout_us -= 100; + usleep(100); + } while (timeout_us >= 0); + + if (timeout_us < 0) + err = NFP_ERRNO(ETIMEDOUT); + else + err = timeout_us; + +exit: + return err; +} + +/* + * nfp_cpp_read - read from CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer for result + * @length: number of bytes to read + */ +int +nfp_cpp_read(struct nfp_cpp *cpp, uint32_t destination, + unsigned long long address, void *kernel_vaddr, size_t length) +{ + struct nfp_cpp_area *area; + int err; + + area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length); + if (!area) { + printf("Area allocation/acquire failed\n"); + return -1; + } + + err = nfp_cpp_area_read(area, 0, kernel_vaddr, length); + + nfp_cpp_area_release_free(area); + return err; +} + +/* + * nfp_cpp_write - write to CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer to read from + * @length: number of bytes to write + */ +int +nfp_cpp_write(struct nfp_cpp *cpp, uint32_t destination, + unsigned long long address, const void *kernel_vaddr, + size_t length) +{ + struct nfp_cpp_area *area; + int err; + + area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length); + if (!area) + return -1; + + err = nfp_cpp_area_write(area, 0, kernel_vaddr, length); + + nfp_cpp_area_release_free(area); + return err; +} + +/* + * nfp_cpp_area_fill - fill a CPP area with a value + * @area: CPP area + * @offset: offset into CPP area + * @value: value to fill with + * @length: length of area to fill + */ +int +nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + uint32_t value, size_t length) +{ + int err; + size_t i; + uint64_t value64; + + value = rte_cpu_to_le_32(value); + value64 = ((uint64_t)value << 32) | value; + + if ((offset + length) > area->size) + return NFP_ERRNO(EINVAL); + + if ((area->offset + offset) & 3) + return NFP_ERRNO(EINVAL); + + if (((area->offset + offset) & 7) == 4 && length >= 4) { + err = nfp_cpp_area_write(area, offset, &value, sizeof(value)); + if (err < 0) + return err; + if (err != sizeof(value)) + return NFP_ERRNO(ENOSPC); + offset += sizeof(value); + length -= sizeof(value); + } + + for (i = 0; (i + sizeof(value)) < length; i += sizeof(value64)) { + err = + nfp_cpp_area_write(area, offset + i, &value64, + sizeof(value64)); + if (err < 0) + return err; + if (err != sizeof(value64)) + return NFP_ERRNO(ENOSPC); + } + + if ((i + sizeof(value)) <= length) { + err = + nfp_cpp_area_write(area, offset + i, &value, sizeof(value)); + if (err < 0) + return err; + if (err != sizeof(value)) + return NFP_ERRNO(ENOSPC); + i += sizeof(value); + } + + return (int)i; +} + +/* + * NOTE: This code should not use nfp_xpb_* functions, + * as those are model-specific + */ +uint32_t +__nfp_cpp_model_autodetect(struct nfp_cpp *cpp) +{ + uint32_t arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0); + uint32_t model = 0; + + nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model); + + if (NFP_CPP_MODEL_IS_6000(model)) { + uint32_t tmp; + + nfp_cpp_model_set(cpp, model); + + /* The PL's PluDeviceID revision code is authoratative */ + model &= ~0xff; + nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + + NFP_PL_DEVICE_ID, &tmp); + model |= (NFP_PL_DEVICE_ID_MASK & tmp) - 0x10; + } + + return model; +} + +/* + * nfp_cpp_map_area() - Helper function to map an area + * @cpp: NFP CPP handler + * @domain: CPP domain + * @target: CPP target + * @addr: CPP address + * @size: Size of the area + * @area: Area handle (output) + * + * Map an area of IOMEM access. To undo the effect of this function call + * @nfp_cpp_area_release_free(*area). + * + * Return: Pointer to memory mapped area or ERR_PTR + */ +uint8_t * +nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, uint64_t addr, + unsigned long size, struct nfp_cpp_area **area) +{ + uint8_t *res; + uint32_t dest; + + dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain); + + *area = nfp_cpp_area_alloc_acquire(cpp, dest, addr, size); + if (!*area) + goto err_eio; + + res = nfp_cpp_area_iomem(*area); + if (!res) + goto err_release_free; + + return res; + +err_release_free: + nfp_cpp_area_release_free(*area); +err_eio: + return NULL; +} diff --git a/drivers/net/nfp/nfpcore/nfp_crc.c b/drivers/net/nfp/nfpcore/nfp_crc.c new file mode 100644 index 00000000..20431bf8 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_crc.c @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include + +#include "nfp_crc.h" + +static inline uint32_t +nfp_crc32_be_generic(uint32_t crc, unsigned char const *p, size_t len, + uint32_t polynomial) +{ + int i; + while (len--) { + crc ^= *p++ << 24; + for (i = 0; i < 8; i++) + crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial : + 0); + } + return crc; +} + +static inline uint32_t +nfp_crc32_be(uint32_t crc, unsigned char const *p, size_t len) +{ + return nfp_crc32_be_generic(crc, p, len, CRCPOLY_BE); +} + +static uint32_t +nfp_crc32_posix_end(uint32_t crc, size_t total_len) +{ + /* Extend with the length of the string. */ + while (total_len != 0) { + uint8_t c = total_len & 0xff; + + crc = nfp_crc32_be(crc, &c, 1); + total_len >>= 8; + } + + return ~crc; +} + +uint32_t +nfp_crc32_posix(const void *buff, size_t len) +{ + return nfp_crc32_posix_end(nfp_crc32_be(0, buff, len), len); +} diff --git a/drivers/net/nfp/nfpcore/nfp_crc.h b/drivers/net/nfp/nfpcore/nfp_crc.h new file mode 100644 index 00000000..f99c89fc --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_crc.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_CRC_H__ +#define __NFP_CRC_H__ + +/* + * There are multiple 16-bit CRC polynomials in common use, but this is + * *the* standard CRC-32 polynomial, first popularized by Ethernet. + * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 + */ +#define CRCPOLY_LE 0xedb88320 +#define CRCPOLY_BE 0x04c11db7 + +uint32_t nfp_crc32_posix(const void *buff, size_t len); + +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/nfp/nfpcore/nfp_hwinfo.c new file mode 100644 index 00000000..c0516bf8 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.c @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM + * after chip reset. + * + * Examples of the fields: + * me.count = 40 + * me.mask = 0x7f_ffff_ffff + * + * me.count is the total number of MEs on the system. + * me.mask is the bitmask of MEs that are available for application usage. + * + * (ie, in this example, ME 39 has been reserved by boardconfig.) + */ + +#include +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" +#include "nfp_resource.h" +#include "nfp_hwinfo.h" +#include "nfp_crc.h" + +static int +nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo) +{ + return hwinfo->version & NFP_HWINFO_VERSION_UPDATING; +} + +static int +nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo, uint32_t size) +{ + const char *key, *val, *end = hwinfo->data + size; + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + val = key + strlen(key) + 1; + if (val >= end) { + printf("Bad HWINFO - overflowing key\n"); + return -EINVAL; + } + + if (val + strlen(val) + 1 > end) { + printf("Bad HWINFO - overflowing value\n"); + return -EINVAL; + } + } + return 0; +} + +static int +nfp_hwinfo_db_validate(struct nfp_hwinfo *db, uint32_t len) +{ + uint32_t size, new_crc, *crc; + + size = db->size; + if (size > len) { + printf("Unsupported hwinfo size %u > %u\n", size, len); + return -EINVAL; + } + + size -= sizeof(uint32_t); + new_crc = nfp_crc32_posix((char *)db, size); + crc = (uint32_t *)(db->start + size); + if (new_crc != *crc) { + printf("Corrupt hwinfo table (CRC mismatch)\n"); + printf("\tcalculated 0x%x, expected 0x%x\n", new_crc, *crc); + return -EINVAL; + } + + return nfp_hwinfo_db_walk(db, size); +} + +static struct nfp_hwinfo * +nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size) +{ + struct nfp_hwinfo *header; + void *res; + uint64_t cpp_addr; + uint32_t cpp_id; + int err; + uint8_t *db; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO); + if (res) { + cpp_id = nfp_resource_cpp_id(res); + cpp_addr = nfp_resource_address(res); + *cpp_size = nfp_resource_size(res); + + nfp_resource_release(res); + + if (*cpp_size < HWINFO_SIZE_MIN) + return NULL; + } else { + return NULL; + } + + db = malloc(*cpp_size + 1); + if (!db) + return NULL; + + err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size); + if (err != (int)*cpp_size) + goto exit_free; + + header = (void *)db; + printf("NFP HWINFO header: %08x\n", *(uint32_t *)header); + if (nfp_hwinfo_is_updating(header)) + goto exit_free; + + if (header->version != NFP_HWINFO_VERSION_2) { + printf("Unknown HWInfo version: 0x%08x\n", + header->version); + goto exit_free; + } + + /* NULL-terminate for safety */ + db[*cpp_size] = '\0'; + + return (void *)db; +exit_free: + free(db); + return NULL; +} + +static struct nfp_hwinfo * +nfp_hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size) +{ + struct timespec wait; + struct nfp_hwinfo *db; + int count; + + wait.tv_sec = 0; + wait.tv_nsec = 10000000; + count = 0; + + for (;;) { + db = nfp_hwinfo_try_fetch(cpp, hwdb_size); + if (db) + return db; + + nanosleep(&wait, NULL); + if (count++ > 200) { + printf("NFP access error\n"); + return NULL; + } + } +} + +struct nfp_hwinfo * +nfp_hwinfo_read(struct nfp_cpp *cpp) +{ + struct nfp_hwinfo *db; + size_t hwdb_size = 0; + int err; + + db = nfp_hwinfo_fetch(cpp, &hwdb_size); + if (!db) + return NULL; + + err = nfp_hwinfo_db_validate(db, hwdb_size); + if (err) { + free(db); + return NULL; + } + return db; +} + +/* + * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name + * @hwinfo: NFP HWinfo table + * @lookup: HWInfo name to search for + * + * Return: Value of the HWInfo name, or NULL + */ +const char * +nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup) +{ + const char *key, *val, *end; + + if (!hwinfo || !lookup) + return NULL; + + end = hwinfo->data + hwinfo->size - sizeof(uint32_t); + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + val = key + strlen(key) + 1; + + if (strcmp(key, lookup) == 0) + return val; + } + + return NULL; +} diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.h b/drivers/net/nfp/nfpcore/nfp_hwinfo.h new file mode 100644 index 00000000..ccc61632 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_HWINFO_H__ +#define __NFP_HWINFO_H__ + +#include + +#define HWINFO_SIZE_MIN 0x100 + +/* + * The Hardware Info Table defines the properties of the system. + * + * HWInfo v1 Table (fixed size) + * + * 0x0000: uint32_t version Hardware Info Table version (1.0) + * 0x0004: uint32_t size Total size of the table, including the + * CRC32 (IEEE 802.3) + * 0x0008: uint32_t jumptab Offset of key/value table + * 0x000c: uint32_t keys Total number of keys in the key/value + * table + * NNNNNN: Key/value jump table and string data + * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * HWInfo v2 Table (variable size) + * + * 0x0000: uint32_t version Hardware Info Table version (2.0) + * 0x0004: uint32_t size Current size of the data area, excluding + * CRC32 + * 0x0008: uint32_t limit Maximum size of the table + * 0x000c: uint32_t reserved Unused, set to zero + * NNNNNN: Key/value data + * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * If the HWInfo table is in the process of being updated, the low bit of + * version will be set. + * + * HWInfo v1 Key/Value Table + * ------------------------- + * + * The key/value table is a set of offsets to ASCIIZ strings which have + * been strcmp(3) sorted (yes, please use bsearch(3) on the table). + * + * All keys are guaranteed to be unique. + * + * N+0: uint32_t key_1 Offset to the first key + * N+4: uint32_t val_1 Offset to the first value + * N+8: uint32_t key_2 Offset to the second key + * N+c: uint32_t val_2 Offset to the second value + * ... + * + * HWInfo v2 Key/Value Table + * ------------------------- + * + * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000' + * + * Unsorted. + */ + +#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_UPDATING BIT(0) + +struct nfp_hwinfo { + uint8_t start[0]; + + uint32_t version; + uint32_t size; + + /* v2 specific fields */ + uint32_t limit; + uint32_t resv; + + char data[]; +}; + +struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp); + +const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup); + +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_mip.c b/drivers/net/nfp/nfpcore/nfp_mip.c new file mode 100644 index 00000000..c86966df --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_mip.c @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include + +#include "nfp_cpp.h" +#include "nfp_mip.h" +#include "nfp_nffw.h" + +#define NFP_MIP_SIGNATURE rte_cpu_to_le_32(0x0050494d) /* "MIP\0" */ +#define NFP_MIP_VERSION rte_cpu_to_le_32(1) +#define NFP_MIP_MAX_OFFSET (256 * 1024) + +struct nfp_mip { + uint32_t signature; + uint32_t mip_version; + uint32_t mip_size; + uint32_t first_entry; + + uint32_t version; + uint32_t buildnum; + uint32_t buildtime; + uint32_t loadtime; + + uint32_t symtab_addr; + uint32_t symtab_size; + uint32_t strtab_addr; + uint32_t strtab_size; + + char name[16]; + char toolchain[32]; +}; + +/* Read memory and check if it could be a valid MIP */ +static int +nfp_mip_try_read(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr, + struct nfp_mip *mip) +{ + int ret; + + ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip)); + if (ret != sizeof(*mip)) { + printf("Failed to read MIP data (%d, %zu)\n", + ret, sizeof(*mip)); + return -EIO; + } + if (mip->signature != NFP_MIP_SIGNATURE) { + printf("Incorrect MIP signature (0x%08x)\n", + rte_le_to_cpu_32(mip->signature)); + return -EINVAL; + } + if (mip->mip_version != NFP_MIP_VERSION) { + printf("Unsupported MIP version (%d)\n", + rte_le_to_cpu_32(mip->mip_version)); + return -EINVAL; + } + + return 0; +} + +/* Try to locate MIP using the resource table */ +static int +nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip) +{ + struct nfp_nffw_info *nffw_info; + uint32_t cpp_id; + uint64_t addr; + int err; + + nffw_info = nfp_nffw_info_open(cpp); + if (!nffw_info) + return -ENODEV; + + err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr); + if (err) + goto exit_close_nffw; + + err = nfp_mip_try_read(cpp, cpp_id, addr, mip); +exit_close_nffw: + nfp_nffw_info_close(nffw_info); + return err; +} + +/* + * nfp_mip_open() - Get device MIP structure + * @cpp: NFP CPP Handle + * + * Copy MIP structure from NFP device and return it. The returned + * structure is handled internally by the library and should be + * freed by calling nfp_mip_close(). + * + * Return: pointer to mip, NULL on failure. + */ +struct nfp_mip * +nfp_mip_open(struct nfp_cpp *cpp) +{ + struct nfp_mip *mip; + int err; + + mip = malloc(sizeof(*mip)); + if (!mip) + return NULL; + + err = nfp_mip_read_resource(cpp, mip); + if (err) { + free(mip); + return NULL; + } + + mip->name[sizeof(mip->name) - 1] = 0; + + return mip; +} + +void +nfp_mip_close(struct nfp_mip *mip) +{ + free(mip); +} + +const char * +nfp_mip_name(const struct nfp_mip *mip) +{ + return mip->name; +} + +/* + * nfp_mip_symtab() - Get the address and size of the MIP symbol table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol table + * @size: Location for size of MIP symbol table + */ +void +nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size) +{ + *addr = rte_le_to_cpu_32(mip->symtab_addr); + *size = rte_le_to_cpu_32(mip->symtab_size); +} + +/* + * nfp_mip_strtab() - Get the address and size of the MIP symbol name table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol name table + * @size: Location for size of MIP symbol name table + */ +void +nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size) +{ + *addr = rte_le_to_cpu_32(mip->strtab_addr); + *size = rte_le_to_cpu_32(mip->strtab_size); +} diff --git a/drivers/net/nfp/nfpcore/nfp_mip.h b/drivers/net/nfp/nfpcore/nfp_mip.h new file mode 100644 index 00000000..d0919b58 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_mip.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_MIP_H__ +#define __NFP_MIP_H__ + +#include "nfp_nffw.h" + +struct nfp_mip; + +struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp); +void nfp_mip_close(struct nfp_mip *mip); + +const char *nfp_mip_name(const struct nfp_mip *mip); +void nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size); +void nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size); +int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id, + uint64_t *off); +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_mutex.c b/drivers/net/nfp/nfpcore/nfp_mutex.c new file mode 100644 index 00000000..318c5800 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_mutex.c @@ -0,0 +1,424 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include + +#include +#include +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define MUTEX_LOCKED(interface) ((((uint32_t)(interface)) << 16) | 0x000f) +#define MUTEX_UNLOCK(interface) (0 | 0x0000) + +#define MUTEX_IS_LOCKED(value) (((value) & 0xffff) == 0x000f) +#define MUTEX_IS_UNLOCKED(value) (((value) & 0xffff) == 0x0000) +#define MUTEX_INTERFACE(value) (((value) >> 16) & 0xffff) + +/* + * If you need more than 65536 recursive locks, please + * rethink your code. + */ +#define MUTEX_DEPTH_MAX 0xffff + +struct nfp_cpp_mutex { + struct nfp_cpp *cpp; + uint8_t target; + uint16_t depth; + unsigned long long address; + uint32_t key; + unsigned int usage; + struct nfp_cpp_mutex *prev, *next; +}; + +static int +_nfp_cpp_mutex_validate(uint32_t model, int *target, unsigned long long address) +{ + /* Address must be 64-bit aligned */ + if (address & 7) + return NFP_ERRNO(EINVAL); + + if (NFP_CPP_MODEL_IS_6000(model)) { + if (*target != NFP_CPP_TARGET_MU) + return NFP_ERRNO(EINVAL); + } else { + return NFP_ERRNO(EINVAL); + } + + return 0; +} + +/* + * Initialize a mutex location + * + * The CPP target:address must point to a 64-bit aligned location, and + * will initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this + * nfp_cpp_interface(). + * + * This function should only be called when setting up + * the initial lock state upon boot-up of the system. + * + * @param mutex NFP CPP Mutex handle + * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or + * NFP_CPP_TARGET_MU) + * @param address Offset into the address space of the NFP CPP target ID + * @param key Unique 32-bit value for this mutex + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, unsigned long long address, + uint32_t key) +{ + uint32_t model = nfp_cpp_model(cpp); + uint32_t muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + int err; + + err = _nfp_cpp_mutex_validate(model, &target, address); + if (err < 0) + return err; + + err = nfp_cpp_writel(cpp, muw, address + 4, key); + if (err < 0) + return err; + + err = + nfp_cpp_writel(cpp, muw, address + 0, + MUTEX_LOCKED(nfp_cpp_interface(cpp))); + if (err < 0) + return err; + + return 0; +} + +/* + * Create a mutex handle from an address controlled by a MU Atomic engine + * + * The CPP target:address must point to a 64-bit aligned location, and + * reserve 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the + * MU Atomic Engine are supported. + * + * @param cpp NFP CPP handle + * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or + * NFP_CPP_TARGET_MU) + * @param address Offset into the address space of the NFP CPP target ID + * @param key 32-bit unique key (must match the key at this location) + * + * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. + */ +struct nfp_cpp_mutex * +nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, uint32_t key) +{ + uint32_t model = nfp_cpp_model(cpp); + struct nfp_cpp_mutex *mutex; + uint32_t mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + int err; + uint32_t tmp; + + /* Look for cached mutex */ + for (mutex = cpp->mutex_cache; mutex; mutex = mutex->next) { + if (mutex->target == target && mutex->address == address) + break; + } + + if (mutex) { + if (mutex->key == key) { + mutex->usage++; + return mutex; + } + + /* If the key doesn't match... */ + return NFP_ERRPTR(EEXIST); + } + + err = _nfp_cpp_mutex_validate(model, &target, address); + if (err < 0) + return NULL; + + err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); + if (err < 0) + return NULL; + + if (tmp != key) + return NFP_ERRPTR(EEXIST); + + mutex = calloc(sizeof(*mutex), 1); + if (!mutex) + return NFP_ERRPTR(ENOMEM); + + mutex->cpp = cpp; + mutex->target = target; + mutex->address = address; + mutex->key = key; + mutex->depth = 0; + mutex->usage = 1; + + /* Add mutex to the cache */ + if (cpp->mutex_cache) { + cpp->mutex_cache->prev = mutex; + mutex->next = cpp->mutex_cache; + cpp->mutex_cache = mutex; + } else { + cpp->mutex_cache = mutex; + } + + return mutex; +} + +struct nfp_cpp * +nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex) +{ + return mutex->cpp; +} + +uint32_t +nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex) +{ + return mutex->key; +} + +uint16_t +nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex) +{ + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + uint32_t value, key; + int err; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + return err; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return NFP_ERRNO(EPERM); + + if (!MUTEX_IS_LOCKED(value)) + return 0; + + return MUTEX_INTERFACE(value); +} + +int +nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex) +{ + return mutex->target; +} + +uint64_t +nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex) +{ + return mutex->address; +} + +/* + * Free a mutex handle - does not alter the lock state + * + * @param mutex NFP CPP Mutex handle + */ +void +nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) +{ + mutex->usage--; + if (mutex->usage > 0) + return; + + /* Remove mutex from the cache */ + if (mutex->next) + mutex->next->prev = mutex->prev; + if (mutex->prev) + mutex->prev->next = mutex->next; + + /* If mutex->cpp == NULL, something broke */ + if (mutex->cpp && mutex == mutex->cpp->mutex_cache) + mutex->cpp->mutex_cache = mutex->next; + + free(mutex); +} + +/* + * Lock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) +{ + int err; + time_t warn_at = time(NULL) + 15; + + while ((err = nfp_cpp_mutex_trylock(mutex)) != 0) { + /* If errno != EBUSY, then the lock was damaged */ + if (err < 0 && errno != EBUSY) + return err; + if (time(NULL) >= warn_at) { + printf("Warning: waiting for NFP mutex\n"); + printf("\tusage:%u\n", mutex->usage); + printf("\tdepth:%hd]\n", mutex->depth); + printf("\ttarget:%d\n", mutex->target); + printf("\taddr:%llx\n", mutex->address); + printf("\tkey:%08x]\n", mutex->key); + warn_at = time(NULL) + 60; + } + sched_yield(); + } + return 0; +} + +/* + * Unlock a mutex handle, using the NFP MU Atomic Engine + * + * @param mutex NFP CPP Mutex handle + * + * @return 0 on success, or -1 on failure (and set errno accordingly). + */ +int +nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) +{ + uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + uint32_t key, value; + uint16_t interface = nfp_cpp_interface(cpp); + int err; + + if (mutex->depth > 1) { + mutex->depth--; + return 0; + } + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + goto exit; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + goto exit; + + if (key != mutex->key) { + err = NFP_ERRNO(EPERM); + goto exit; + } + + if (value != MUTEX_LOCKED(interface)) { + err = NFP_ERRNO(EACCES); + goto exit; + } + + err = nfp_cpp_writel(cpp, muw, mutex->address, MUTEX_UNLOCK(interface)); + if (err < 0) + goto exit; + + mutex->depth = 0; + +exit: + return err; +} + +/* + * Attempt to lock a mutex handle, using the NFP MU Atomic Engine + * + * Valid lock states: + * + * 0x....0000 - Unlocked + * 0x....000f - Locked + * + * @param mutex NFP CPP Mutex handle + * @return 0 if the lock succeeded, -1 on failure (and errno set + * appropriately). + */ +int +nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) +{ + uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + uint32_t mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ + uint32_t key, value, tmp; + struct nfp_cpp *cpp = mutex->cpp; + int err; + + if (mutex->depth > 0) { + if (mutex->depth == MUTEX_DEPTH_MAX) + return NFP_ERRNO(E2BIG); + + mutex->depth++; + return 0; + } + + /* Verify that the lock marker is not damaged */ + err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); + if (err < 0) + goto exit; + + if (key != mutex->key) { + err = NFP_ERRNO(EPERM); + goto exit; + } + + /* + * Compare against the unlocked state, and if true, + * write the interface id into the top 16 bits, and + * mark as locked. + */ + value = MUTEX_LOCKED(nfp_cpp_interface(cpp)); + + /* + * We use test_set_imm here, as it implies a read + * of the current state, and sets the bits in the + * bytemask of the command to 1s. Since the mutex + * is guaranteed to be 64-bit aligned, the bytemask + * of this 32-bit command is ensured to be 8'b00001111, + * which implies that the lower 4 bits will be set to + * ones regardless of the initial state. + * + * Since this is a 'Readback' operation, with no Pull + * data, we can treat this as a normal Push (read) + * atomic, which returns the original value. + */ + err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); + if (err < 0) + goto exit; + + /* Was it unlocked? */ + if (MUTEX_IS_UNLOCKED(tmp)) { + /* + * The read value can only be 0x....0000 in the unlocked state. + * If there was another contending for this lock, then + * the lock state would be 0x....000f + * + * Write our owner ID into the lock + * While not strictly necessary, this helps with + * debug and bookkeeping. + */ + err = nfp_cpp_writel(cpp, muw, mutex->address, value); + if (err < 0) + goto exit; + + mutex->depth = 1; + goto exit; + } + + /* Already locked by us? Success! */ + if (tmp == value) { + mutex->depth = 1; + goto exit; + } + + err = NFP_ERRNO(MUTEX_IS_LOCKED(tmp) ? EBUSY : EINVAL); + +exit: + return err; +} diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.c b/drivers/net/nfp/nfpcore/nfp_nffw.c new file mode 100644 index 00000000..8bec0e3c --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nffw.c @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include "nfp_cpp.h" +#include "nfp_nffw.h" +#include "nfp_mip.h" +#include "nfp6000/nfp6000.h" +#include "nfp_resource.h" + +/* + * flg_info_version = flags[0]<27:16> + * This is a small version counter intended only to detect if the current + * implementation can read the current struct. Struct changes should be very + * rare and as such a 12-bit counter should cover large spans of time. By the + * time it wraps around, we don't expect to have 4096 versions of this struct + * to be in use at the same time. + */ +static uint32_t +nffw_res_info_version_get(const struct nfp_nffw_info_data *res) +{ + return (res->flags[0] >> 16) & 0xfff; +} + +/* flg_init = flags[0]<0> */ +static uint32_t +nffw_res_flg_init_get(const struct nfp_nffw_info_data *res) +{ + return (res->flags[0] >> 0) & 1; +} + +/* loaded = loaded__mu_da__mip_off_hi<31:31> */ +static uint32_t +nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi) +{ + return (fi->loaded__mu_da__mip_off_hi >> 31) & 1; +} + +/* mip_cppid = mip_cppid */ +static uint32_t +nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi) +{ + return fi->mip_cppid; +} + +/* loaded = loaded__mu_da__mip_off_hi<8:8> */ +static uint32_t +nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi) +{ + return (fi->loaded__mu_da__mip_off_hi >> 8) & 1; +} + +/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */ +static uint64_t +nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi) +{ + uint64_t mip_off_hi = fi->loaded__mu_da__mip_off_hi; + + return (mip_off_hi & 0xFF) << 32 | fi->mip_offset_lo; +} + +#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) + +static int +nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp) +{ + unsigned int mode, addr40; + uint32_t xpbaddr, imbcppat; + int err; + + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4; + err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat); + if (err < 0) + return err; + + mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); + addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); + + return nfp_cppat_mu_locality_lsb(mode, addr40); +} + +static unsigned int +nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) +{ + /* + * For the this code, version 0 is most likely to be version 1 in this + * case. Since the kernel driver does not take responsibility for + * initialising the nfp.nffw resource, any previous code (CA firmware or + * userspace) that left the version 0 and did set the init flag is going + * to be version 1. + */ + switch (nffw_res_info_version_get(fwinf)) { + case 0: + case 1: + *arr = &fwinf->info.v1.fwinfo[0]; + return NFFW_FWINFO_CNT_V1; + case 2: + *arr = &fwinf->info.v2.fwinfo[0]; + return NFFW_FWINFO_CNT_V2; + default: + *arr = NULL; + return 0; + } +} + +/* + * nfp_nffw_info_open() - Acquire the lock on the NFFW table + * @cpp: NFP CPP handle + * + * Return: 0, or -ERRNO + */ +struct nfp_nffw_info * +nfp_nffw_info_open(struct nfp_cpp *cpp) +{ + struct nfp_nffw_info_data *fwinf; + struct nfp_nffw_info *state; + uint32_t info_ver; + int err; + + state = malloc(sizeof(*state)); + if (!state) + return NULL; + + memset(state, 0, sizeof(*state)); + + state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW); + if (!state->res) + goto err_free; + + fwinf = &state->fwinf; + + if (sizeof(*fwinf) > nfp_resource_size(state->res)) + goto err_release; + + err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), + nfp_resource_address(state->res), + fwinf, sizeof(*fwinf)); + if (err < (int)sizeof(*fwinf)) + goto err_release; + + if (!nffw_res_flg_init_get(fwinf)) + goto err_release; + + info_ver = nffw_res_info_version_get(fwinf); + if (info_ver > NFFW_INFO_VERSION_CURRENT) + goto err_release; + + state->cpp = cpp; + return state; + +err_release: + nfp_resource_release(state->res); +err_free: + free(state); + return NULL; +} + +/* + * nfp_nffw_info_release() - Release the lock on the NFFW table + * @state: NFP FW info state + * + * Return: 0, or -ERRNO + */ +void +nfp_nffw_info_close(struct nfp_nffw_info *state) +{ + nfp_resource_release(state->res); + free(state); +} + +/* + * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW + * @state: NFP FW info state + * + * Return: First NFFW firmware info, NULL on failure + */ +static struct nffw_fwinfo * +nfp_nffw_info_fwid_first(struct nfp_nffw_info *state) +{ + struct nffw_fwinfo *fwinfo; + unsigned int cnt, i; + + cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo); + if (!cnt) + return NULL; + + for (i = 0; i < cnt; i++) + if (nffw_fwinfo_loaded_get(&fwinfo[i])) + return &fwinfo[i]; + + return NULL; +} + +/* + * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP + * @state: NFP FW info state + * @cpp_id: Pointer to the CPP ID of the MIP + * @off: Pointer to the CPP Address of the MIP + * + * Return: 0, or -ERRNO + */ +int +nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id, + uint64_t *off) +{ + struct nffw_fwinfo *fwinfo; + + fwinfo = nfp_nffw_info_fwid_first(state); + if (!fwinfo) + return -EINVAL; + + *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo); + *off = nffw_fwinfo_mip_offset_get(fwinfo); + + if (nffw_fwinfo_mip_mu_da_get(fwinfo)) { + int locality_off; + + if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU) + return 0; + + locality_off = nfp_mip_mu_locality_lsb(state->cpp); + if (locality_off < 0) + return locality_off; + + *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + } + + return 0; +} diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.h b/drivers/net/nfp/nfpcore/nfp_nffw.h new file mode 100644 index 00000000..3bbdf1c1 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nffw.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_NFFW_H__ +#define __NFP_NFFW_H__ + +#include "nfp-common/nfp_platform.h" +#include "nfp_cpp.h" + +/* + * Init-CSR owner IDs for firmware map to firmware IDs which start at 4. + * Lower IDs are reserved for target and loader IDs. + */ +#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ +#define NFFW_FWID_BASE 4 + +#define NFFW_FWID_ALL 255 + +/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4. + * Lower IDs are reserved for target and loader IDs. + */ +#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ +#define NFFW_FWID_BASE 4 + +#define NFFW_FWID_ALL 255 + +/** + * NFFW_INFO_VERSION history: + * 0: This was never actually used (before versioning), but it refers to + * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later + * changed to 200. + * 1: First versioned struct, with + * FWINFO_CNT = 120 + * MEINFO_CNT = 120 + * 2: FWINFO_CNT = 200 + * MEINFO_CNT = 200 + */ +#define NFFW_INFO_VERSION_CURRENT 2 + +/* Enough for all current chip families */ +#define NFFW_MEINFO_CNT_V1 120 +#define NFFW_FWINFO_CNT_V1 120 +#define NFFW_MEINFO_CNT_V2 200 +#define NFFW_FWINFO_CNT_V2 200 + +struct nffw_meinfo { + uint32_t ctxmask__fwid__meid; +}; + +struct nffw_fwinfo { + uint32_t loaded__mu_da__mip_off_hi; + uint32_t mip_cppid; /* 0 means no MIP */ + uint32_t mip_offset_lo; +}; + +struct nfp_nffw_info_v1 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1]; +}; + +struct nfp_nffw_info_v2 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2]; +}; + +struct nfp_nffw_info_data { + uint32_t flags[2]; + union { + struct nfp_nffw_info_v1 v1; + struct nfp_nffw_info_v2 v2; + } info; +}; + +struct nfp_nffw_info { + struct nfp_cpp *cpp; + struct nfp_resource *res; + + struct nfp_nffw_info_data fwinf; +}; + +struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp); +void nfp_nffw_info_close(struct nfp_nffw_info *state); + +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.c b/drivers/net/nfp/nfpcore/nfp_nsp.c new file mode 100644 index 00000000..876a4017 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nsp.c @@ -0,0 +1,427 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#define NFP_SUBSYS "nfp_nsp" + +#include +#include + +#include + +#include "nfp_cpp.h" +#include "nfp_nsp.h" +#include "nfp_resource.h" + +int +nfp_nsp_config_modified(struct nfp_nsp *state) +{ + return state->modified; +} + +void +nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified) +{ + state->modified = modified; +} + +void * +nfp_nsp_config_entries(struct nfp_nsp *state) +{ + return state->entries; +} + +unsigned int +nfp_nsp_config_idx(struct nfp_nsp *state) +{ + return state->idx; +} + +void +nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx) +{ + state->entries = entries; + state->idx = idx; +} + +void +nfp_nsp_config_clear_state(struct nfp_nsp *state) +{ + state->entries = NULL; + state->idx = 0; +} + +static void +nfp_nsp_print_extended_error(uint32_t ret_val) +{ + int i; + + if (!ret_val) + return; + + for (i = 0; i < (int)ARRAY_SIZE(nsp_errors); i++) + if (ret_val == (uint32_t)nsp_errors[i].code) + printf("err msg: %s\n", nsp_errors[i].msg); +} + +static int +nfp_nsp_check(struct nfp_nsp *state) +{ + struct nfp_cpp *cpp = state->cpp; + uint64_t nsp_status, reg; + uint32_t nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_status = nfp_resource_address(state->res) + NSP_STATUS; + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, ®); + if (err < 0) + return err; + + if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) { + printf("Cannot detect NFP Service Processor\n"); + return -ENODEV; + } + + state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg); + state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); + + if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) { + printf("Unsupported ABI %hu.%hu\n", state->ver.major, + state->ver.minor); + return -EINVAL; + } + + if (reg & NSP_STATUS_BUSY) { + printf("Service processor busy!\n"); + return -EBUSY; + } + + return 0; +} + +/* + * nfp_nsp_open() - Prepare for communication and lock the NSP resource. + * @cpp: NFP CPP Handle + */ +struct nfp_nsp * +nfp_nsp_open(struct nfp_cpp *cpp) +{ + struct nfp_resource *res; + struct nfp_nsp *state; + int err; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP); + if (!res) + return NULL; + + state = malloc(sizeof(*state)); + if (!state) { + nfp_resource_release(res); + return NULL; + } + memset(state, 0, sizeof(*state)); + state->cpp = cpp; + state->res = res; + + err = nfp_nsp_check(state); + if (err) { + nfp_nsp_close(state); + return NULL; + } + + return state; +} + +/* + * nfp_nsp_close() - Clean up and unlock the NSP resource. + * @state: NFP SP state + */ +void +nfp_nsp_close(struct nfp_nsp *state) +{ + nfp_resource_release(state->res); + free(state); +} + +uint16_t +nfp_nsp_get_abi_ver_major(struct nfp_nsp *state) +{ + return state->ver.major; +} + +uint16_t +nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state) +{ + return state->ver.minor; +} + +static int +nfp_nsp_wait_reg(struct nfp_cpp *cpp, uint64_t *reg, uint32_t nsp_cpp, + uint64_t addr, uint64_t mask, uint64_t val) +{ + struct timespec wait; + int count; + int err; + + wait.tv_sec = 0; + wait.tv_nsec = 25000000; + count = 0; + + for (;;) { + err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg); + if (err < 0) + return err; + + if ((*reg & mask) == val) + return 0; + + nanosleep(&wait, 0); + if (count++ > 1000) + return -ETIMEDOUT; + } +} + +/* + * nfp_nsp_command() - Execute a command on the NFP Service Processor + * @state: NFP SP state + * @code: NFP SP Command Code + * @option: NFP SP Command Argument + * @buff_cpp: NFP SP Buffer CPP Address info + * @buff_addr: NFP SP Buffer Host address + * + * Return: 0 for success with no result + * + * positive value for NSP completion with a result code + * + * -EAGAIN if the NSP is not yet present + * -ENODEV if the NSP is not a supported model + * -EBUSY if the NSP is stuck + * -EINTR if interrupted while waiting for completion + * -ETIMEDOUT if the NSP took longer than 30 seconds to complete + */ +static int +nfp_nsp_command(struct nfp_nsp *state, uint16_t code, uint32_t option, + uint32_t buff_cpp, uint64_t buff_addr) +{ + uint64_t reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; + struct nfp_cpp *cpp = state->cpp; + uint32_t nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_base = nfp_resource_address(state->res); + nsp_status = nsp_base + NSP_STATUS; + nsp_command = nsp_base + NSP_COMMAND; + nsp_buffer = nsp_base + NSP_BUFFER; + + err = nfp_nsp_check(state); + if (err) + return err; + + if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) { + printf("Host buffer out of reach %08x %" PRIx64 "\n", + buff_cpp, buff_addr); + return -EINVAL; + } + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, + FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr)); + if (err < 0) + return err; + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, + FIELD_PREP(NSP_COMMAND_OPTION, option) | + FIELD_PREP(NSP_COMMAND_CODE, code) | + FIELD_PREP(NSP_COMMAND_START, 1)); + if (err < 0) + return err; + + /* Wait for NSP_COMMAND_START to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_command, + NSP_COMMAND_START, 0); + if (err) { + printf("Error %d waiting for code 0x%04x to start\n", + err, code); + return err; + } + + /* Wait for NSP_STATUS_BUSY to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, NSP_STATUS_BUSY, + 0); + if (err) { + printf("Error %d waiting for code 0x%04x to complete\n", + err, code); + return err; + } + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val); + if (err < 0) + return err; + ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val); + + err = FIELD_GET(NSP_STATUS_RESULT, reg); + if (err) { + printf("Result (error) code set: %d (%d) command: %d\n", + -err, (int)ret_val, code); + nfp_nsp_print_extended_error(ret_val); + return -err; + } + + return ret_val; +} + +#define SZ_1M 0x00100000 + +static int +nfp_nsp_command_buf(struct nfp_nsp *nsp, uint16_t code, uint32_t option, + const void *in_buf, unsigned int in_size, void *out_buf, + unsigned int out_size) +{ + struct nfp_cpp *cpp = nsp->cpp; + unsigned int max_size; + uint64_t reg, cpp_buf; + int ret, err; + uint32_t cpp_id; + + if (nsp->ver.minor < 13) { + printf("NSP: Code 0x%04x with buffer not supported\n", code); + printf("\t(ABI %hu.%hu)\n", nsp->ver.major, nsp->ver.minor); + return -EOPNOTSUPP; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER_CONFIG, + ®); + if (err < 0) + return err; + + max_size = RTE_MAX(in_size, out_size); + if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) { + printf("NSP: default buffer too small for command 0x%04x\n", + code); + printf("\t(%llu < %u)\n", + FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, + max_size); + return -EINVAL; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER, + ®); + if (err < 0) + return err; + + cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8; + cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg); + + if (in_buf && in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); + if (err < 0) + return err; + } + /* Zero out remaining part of the buffer */ + if (out_buf && out_size && out_size > in_size) { + memset(out_buf, 0, out_size - in_size); + err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size, out_buf, + out_size - in_size); + if (err < 0) + return err; + } + + ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf); + if (ret < 0) + return ret; + + if (out_buf && out_size) { + err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size); + if (err < 0) + return err; + } + + return ret; +} + +int +nfp_nsp_wait(struct nfp_nsp *state) +{ + struct timespec wait; + int count; + int err; + + wait.tv_sec = 0; + wait.tv_nsec = 25000000; + count = 0; + + for (;;) { + err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0); + if (err != -EAGAIN) + break; + + nanosleep(&wait, 0); + + if (count++ > 1000) { + err = -ETIMEDOUT; + break; + } + } + if (err) + printf("NSP failed to respond %d\n", err); + + return err; +} + +int +nfp_nsp_device_soft_reset(struct nfp_nsp *state) +{ + return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0); +} + +int +nfp_nsp_mac_reinit(struct nfp_nsp *state) +{ + return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0); +} + +int +nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, size, buf, size, + NULL, 0); +} + +int +nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0, + buf, size); +} + +int +nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, + unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, + NULL, 0); +} + +int +nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0, + buf, size); +} + +int +nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf, + unsigned int size) +{ + return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask, NULL, + 0, buf, size); +} diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h new file mode 100644 index 00000000..c9c7b0d0 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nsp.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef NSP_NSP_H +#define NSP_NSP_H 1 + +#include "nfp_cpp.h" +#include "nfp_nsp.h" + +#define GENMASK_ULL(h, l) \ + (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (64 - 1 - (h)))) + +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define FIELD_GET(_mask, _reg) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \ + })) + +#define FIELD_FIT(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \ + })) + +#define FIELD_PREP(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \ + })) + +/* Offsets relative to the CSR base */ +#define NSP_STATUS 0x00 +#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48) +#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44) +#define NSP_STATUS_MINOR GENMASK_ULL(43, 32) +#define NSP_STATUS_CODE GENMASK_ULL(31, 16) +#define NSP_STATUS_RESULT GENMASK_ULL(15, 8) +#define NSP_STATUS_BUSY BIT_ULL(0) + +#define NSP_COMMAND 0x08 +#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) +#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_START BIT_ULL(0) + +/* CPP address to retrieve the data from */ +#define NSP_BUFFER 0x10 +#define NSP_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38) +#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0) + +#define NSP_DFLT_BUFFER 0x18 + +#define NSP_DFLT_BUFFER_CONFIG 0x20 +#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) + +#define NSP_MAGIC 0xab10 +#define NSP_MAJOR 0 +#define NSP_MINOR 8 + +#define NSP_CODE_MAJOR GENMASK(15, 12) +#define NSP_CODE_MINOR GENMASK(11, 0) + +enum nfp_nsp_cmd { + SPCODE_NOOP = 0, /* No operation */ + SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ + SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */ + SPCODE_PHY_INIT = 3, /* Initialize the PHY */ + SPCODE_MAC_INIT = 4, /* Initialize the MAC */ + SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */ + SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ + SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ + SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ + SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */ + SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ +}; + +static const struct { + int code; + const char *msg; +} nsp_errors[] = { + { 6010, "could not map to phy for port" }, + { 6011, "not an allowed rate/lanes for port" }, + { 6012, "not an allowed rate/lanes for port" }, + { 6013, "high/low error, change other port first" }, + { 6014, "config not found in flash" }, +}; + +struct nfp_nsp { + struct nfp_cpp *cpp; + struct nfp_resource *res; + struct { + uint16_t major; + uint16_t minor; + } ver; + + /* Eth table config state */ + int modified; + unsigned int idx; + void *entries; +}; + +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); +void nfp_nsp_close(struct nfp_nsp *state); +uint16_t nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); +uint16_t nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); +int nfp_nsp_wait(struct nfp_nsp *state); +int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_mac_reinit(struct nfp_nsp *state); +int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, + void *buf, unsigned int size); + +static inline int nfp_nsp_has_mac_reinit(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 20; +} + +enum nfp_eth_interface { + NFP_INTERFACE_NONE = 0, + NFP_INTERFACE_SFP = 1, + NFP_INTERFACE_SFPP = 10, + NFP_INTERFACE_SFP28 = 28, + NFP_INTERFACE_QSFP = 40, + NFP_INTERFACE_CXP = 100, + NFP_INTERFACE_QSFP28 = 112, +}; + +enum nfp_eth_media { + NFP_MEDIA_DAC_PASSIVE = 0, + NFP_MEDIA_DAC_ACTIVE, + NFP_MEDIA_FIBRE, +}; + +enum nfp_eth_aneg { + NFP_ANEG_AUTO = 0, + NFP_ANEG_SEARCH, + NFP_ANEG_25G_CONSORTIUM, + NFP_ANEG_25G_IEEE, + NFP_ANEG_DISABLED, +}; + +enum nfp_eth_fec { + NFP_FEC_AUTO_BIT = 0, + NFP_FEC_BASER_BIT, + NFP_FEC_REED_SOLOMON_BIT, + NFP_FEC_DISABLED_BIT, +}; + +#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT) +#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT) +#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT) +#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT) + +#define ETH_ALEN 6 + +/** + * struct nfp_eth_table - ETH table information + * @count: number of table entries + * @max_index: max of @index fields of all @ports + * @ports: table of ports + * + * @eth_index: port index according to legacy ethX numbering + * @index: chip-wide first channel index + * @nbi: NBI index + * @base: first channel index (within NBI) + * @lanes: number of channels + * @speed: interface speed (in Mbps) + * @interface: interface (module) plugged in + * @media: media type of the @interface + * @fec: forward error correction mode + * @aneg: auto negotiation mode + * @mac_addr: interface MAC address + * @label_port: port id + * @label_subport: id of interface within port (for split ports) + * @enabled: is enabled? + * @tx_enabled: is TX enabled? + * @rx_enabled: is RX enabled? + * @override_changed: is media reconfig pending? + * + * @port_type: one of %PORT_* defines for ethtool + * @port_lanes: total number of lanes on the port (sum of lanes of all subports) + * @is_split: is interface part of a split port + * @fec_modes_supported: bitmap of FEC modes supported + */ +struct nfp_eth_table { + unsigned int count; + unsigned int max_index; + struct nfp_eth_table_port { + unsigned int eth_index; + unsigned int index; + unsigned int nbi; + unsigned int base; + unsigned int lanes; + unsigned int speed; + + unsigned int interface; + enum nfp_eth_media media; + + enum nfp_eth_fec fec; + enum nfp_eth_aneg aneg; + + uint8_t mac_addr[ETH_ALEN]; + + uint8_t label_port; + uint8_t label_subport; + + int enabled; + int tx_enabled; + int rx_enabled; + + int override_changed; + + /* Computed fields */ + uint8_t port_type; + + unsigned int port_lanes; + + int is_split; + + unsigned int fec_modes_supported; + } ports[0]; +}; + +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); + +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable); +int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, + int configed); +int +nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode); + +int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, + unsigned int size); +void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, + unsigned int idx); +void nfp_nsp_config_clear_state(struct nfp_nsp *state); +void nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified); +void *nfp_nsp_config_entries(struct nfp_nsp *state); +int nfp_nsp_config_modified(struct nfp_nsp *state); +unsigned int nfp_nsp_config_idx(struct nfp_nsp *state); + +static inline int nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port) +{ + return !!eth_port->fec_modes_supported; +} + +static inline unsigned int +nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port) +{ + return eth_port->fec_modes_supported; +} + +struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx); +int nfp_eth_config_commit_end(struct nfp_nsp *nsp); +void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp); + +int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode); +int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed); +int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes); + +/** + * struct nfp_nsp_identify - NSP static information + * @version: opaque version string + * @flags: version flags + * @br_primary: branch id of primary bootloader + * @br_secondary: branch id of secondary bootloader + * @br_nsp: branch id of NSP + * @primary: version of primarary bootloader + * @secondary: version id of secondary bootloader + * @nsp: version id of NSP + * @sensor_mask: mask of present sensors available on NIC + */ +struct nfp_nsp_identify { + char version[40]; + uint8_t flags; + uint8_t br_primary; + uint8_t br_secondary; + uint8_t br_nsp; + uint16_t primary; + uint16_t secondary; + uint16_t nsp; + uint64_t sensor_mask; +}; + +struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp); + +enum nfp_nsp_sensor_id { + NFP_SENSOR_CHIP_TEMPERATURE, + NFP_SENSOR_ASSEMBLY_POWER, + NFP_SENSOR_ASSEMBLY_12V_POWER, + NFP_SENSOR_ASSEMBLY_3V3_POWER, +}; + +int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, + long *val); + +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c new file mode 100644 index 00000000..bfd1eddb --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include "nfp_cpp.h" +#include "nfp_nsp.h" +#include "nfp_nffw.h" + +struct nsp_identify { + uint8_t version[40]; + uint8_t flags; + uint8_t br_primary; + uint8_t br_secondary; + uint8_t br_nsp; + uint16_t primary; + uint16_t secondary; + uint16_t nsp; + uint8_t reserved[6]; + uint64_t sensor_mask; +}; + +struct nfp_nsp_identify * +__nfp_nsp_identify(struct nfp_nsp *nsp) +{ + struct nfp_nsp_identify *nspi = NULL; + struct nsp_identify *ni; + int ret; + + if (nfp_nsp_get_abi_ver_minor(nsp) < 15) + return NULL; + + ni = malloc(sizeof(*ni)); + if (!ni) + return NULL; + + memset(ni, 0, sizeof(*ni)); + ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni)); + if (ret < 0) { + printf("reading bsp version failed %d\n", + ret); + goto exit_free; + } + + nspi = malloc(sizeof(*nspi)); + if (!nspi) + goto exit_free; + + memset(nspi, 0, sizeof(*nspi)); + memcpy(nspi->version, ni->version, sizeof(nspi->version)); + nspi->version[sizeof(nspi->version) - 1] = '\0'; + nspi->flags = ni->flags; + nspi->br_primary = ni->br_primary; + nspi->br_secondary = ni->br_secondary; + nspi->br_nsp = ni->br_nsp; + nspi->primary = rte_le_to_cpu_16(ni->primary); + nspi->secondary = rte_le_to_cpu_16(ni->secondary); + nspi->nsp = rte_le_to_cpu_16(ni->nsp); + nspi->sensor_mask = rte_le_to_cpu_64(ni->sensor_mask); + +exit_free: + free(ni); + return nspi; +} + +struct nfp_sensors { + uint32_t chip_temp; + uint32_t assembly_power; + uint32_t assembly_12v_power; + uint32_t assembly_3v3_power; +}; + +int +nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, long *val) +{ + struct nfp_sensors s; + struct nfp_nsp *nsp; + int ret; + + nsp = nfp_nsp_open(cpp); + if (!nsp) + return -EIO; + + ret = nfp_nsp_read_sensors(nsp, BIT(id), &s, sizeof(s)); + nfp_nsp_close(nsp); + + if (ret < 0) + return ret; + + switch (id) { + case NFP_SENSOR_CHIP_TEMPERATURE: + *val = rte_le_to_cpu_32(s.chip_temp); + break; + case NFP_SENSOR_ASSEMBLY_POWER: + *val = rte_le_to_cpu_32(s.assembly_power); + break; + case NFP_SENSOR_ASSEMBLY_12V_POWER: + *val = rte_le_to_cpu_32(s.assembly_12v_power); + break; + case NFP_SENSOR_ASSEMBLY_3V3_POWER: + *val = rte_le_to_cpu_32(s.assembly_3v3_power); + break; + default: + return -EINVAL; + } + return 0; +} diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c new file mode 100644 index 00000000..67946891 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c @@ -0,0 +1,665 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include +#include "nfp_cpp.h" +#include "nfp_nsp.h" +#include "nfp6000/nfp6000.h" + +#define GENMASK_ULL(h, l) \ + (((~0ULL) - (1ULL << (l)) + 1) & \ + (~0ULL >> (64 - 1 - (h)))) + +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define FIELD_GET(_mask, _reg) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \ + })) + +#define FIELD_FIT(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \ + })) + +#define FIELD_PREP(_mask, _val) \ + (__extension__ ({ \ + typeof(_mask) _x = (_mask); \ + ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \ + })) + +#define NSP_ETH_NBI_PORT_COUNT 24 +#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) +#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \ + sizeof(union eth_table_entry)) + +#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0) +#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) +#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48) +#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54) +#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60) +#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61) + +#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES) + +#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0) +#define NSP_ETH_STATE_ENABLED BIT_ULL(1) +#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8) +#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12) +#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20) +#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22) +#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23) +#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26) + +#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0) +#define NSP_ETH_CTRL_ENABLED BIT_ULL(1) +#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4) +#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5) +#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6) +#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7) + +/* Which connector port. */ +#define PORT_TP 0x00 +#define PORT_AUI 0x01 +#define PORT_MII 0x02 +#define PORT_FIBRE 0x03 +#define PORT_BNC 0x04 +#define PORT_DA 0x05 +#define PORT_NONE 0xef +#define PORT_OTHER 0xff + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_5000 5000 +#define SPEED_10000 10000 +#define SPEED_14000 14000 +#define SPEED_20000 20000 +#define SPEED_25000 25000 +#define SPEED_40000 40000 +#define SPEED_50000 50000 +#define SPEED_56000 56000 +#define SPEED_100000 100000 + +enum nfp_eth_raw { + NSP_ETH_RAW_PORT = 0, + NSP_ETH_RAW_STATE, + NSP_ETH_RAW_MAC, + NSP_ETH_RAW_CONTROL, + + NSP_ETH_NUM_RAW +}; + +enum nfp_eth_rate { + RATE_INVALID = 0, + RATE_10M, + RATE_100M, + RATE_1G, + RATE_10G, + RATE_25G, +}; + +union eth_table_entry { + struct { + uint64_t port; + uint64_t state; + uint8_t mac_addr[6]; + uint8_t resv[2]; + uint64_t control; + }; + uint64_t raw[NSP_ETH_NUM_RAW]; +}; + +static const struct { + enum nfp_eth_rate rate; + unsigned int speed; +} nsp_eth_rate_tbl[] = { + { RATE_INVALID, 0, }, + { RATE_10M, SPEED_10, }, + { RATE_100M, SPEED_100, }, + { RATE_1G, SPEED_1000, }, + { RATE_10G, SPEED_10000, }, + { RATE_25G, SPEED_25000, }, +}; + +static unsigned int +nfp_eth_rate2speed(enum nfp_eth_rate rate) +{ + int i; + + for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].rate == rate) + return nsp_eth_rate_tbl[i].speed; + + return 0; +} + +static unsigned int +nfp_eth_speed2rate(unsigned int speed) +{ + int i; + + for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].speed == speed) + return nsp_eth_rate_tbl[i].rate; + + return RATE_INVALID; +} + +static void +nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src) +{ + int i; + + for (i = 0; i < (int)ETH_ALEN; i++) + dst[ETH_ALEN - i - 1] = src[i]; +} + +static void +nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, + unsigned int index, struct nfp_eth_table_port *dst) +{ + unsigned int rate; + unsigned int fec; + uint64_t port, state; + + port = rte_le_to_cpu_64(src->port); + state = rte_le_to_cpu_64(src->state); + + dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port); + dst->index = index; + dst->nbi = index / NSP_ETH_NBI_PORT_COUNT; + dst->base = index % NSP_ETH_NBI_PORT_COUNT; + dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port); + + dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state); + dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state); + dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state); + + rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state)); + dst->speed = dst->lanes * rate; + + dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state); + dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state); + + nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr); + + dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port); + dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) + return; + + dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state); + dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 22) + return; + + fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_BASER, port); + dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT; + fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port); + dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT; + if (dst->fec_modes_supported) + dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED; + + dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state); +} + +static void +nfp_eth_calc_port_geometry(struct nfp_eth_table *table) +{ + unsigned int i, j; + + for (i = 0; i < table->count; i++) { + table->max_index = RTE_MAX(table->max_index, + table->ports[i].index); + + for (j = 0; j < table->count; j++) { + if (table->ports[i].label_port != + table->ports[j].label_port) + continue; + table->ports[i].port_lanes += table->ports[j].lanes; + + if (i == j) + continue; + if (table->ports[i].label_subport == + table->ports[j].label_subport) + printf("Port %d subport %d is a duplicate\n", + table->ports[i].label_port, + table->ports[i].label_subport); + + table->ports[i].is_split = 1; + } + } +} + +static void +nfp_eth_calc_port_type(struct nfp_eth_table_port *entry) +{ + if (entry->interface == NFP_INTERFACE_NONE) { + entry->port_type = PORT_NONE; + return; + } + + if (entry->media == NFP_MEDIA_FIBRE) + entry->port_type = PORT_FIBRE; + else + entry->port_type = PORT_DA; +} + +static struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries; + struct nfp_eth_table *table; + uint32_t table_sz; + int i, j, ret, cnt = 0; + + entries = malloc(NSP_ETH_TABLE_SIZE); + if (!entries) + return NULL; + + memset(entries, 0, NSP_ETH_TABLE_SIZE); + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + printf("reading port table failed %d\n", ret); + goto err; + } + + for (i = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + cnt++; + + /* Some versions of flash will give us 0 instead of port count. For + * those that give a port count, verify it against the value calculated + * above. + */ + if (ret && ret != cnt) { + printf("table entry count (%d) unmatch entries present (%d)\n", + ret, cnt); + goto err; + } + + table_sz = sizeof(*table) + sizeof(struct nfp_eth_table_port) * cnt; + table = malloc(table_sz); + if (!table) + goto err; + + memset(table, 0, table_sz); + table->count = cnt; + for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + nfp_eth_port_translate(nsp, &entries[i], i, + &table->ports[j++]); + + nfp_eth_calc_port_geometry(table); + for (i = 0; i < (int)table->count; i++) + nfp_eth_calc_port_type(&table->ports[i]); + + free(entries); + + return table; + +err: + free(entries); + return NULL; +} + +/* + * nfp_eth_read_ports() - retrieve port information + * @cpp: NFP CPP handle + * + * Read the port information from the device. Returned structure should + * be freed with kfree() once no longer needed. + * + * Return: populated ETH table or NULL on error. + */ +struct nfp_eth_table * +nfp_eth_read_ports(struct nfp_cpp *cpp) +{ + struct nfp_eth_table *ret; + struct nfp_nsp *nsp; + + nsp = nfp_nsp_open(cpp); + if (!nsp) + return NULL; + + ret = __nfp_eth_read_ports(nsp); + nfp_nsp_close(nsp); + + return ret; +} + +struct nfp_nsp * +nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + int ret; + + entries = malloc(NSP_ETH_TABLE_SIZE); + if (!entries) + return NULL; + + memset(entries, 0, NSP_ETH_TABLE_SIZE); + nsp = nfp_nsp_open(cpp); + if (!nsp) { + free(entries); + return nsp; + } + + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + printf("reading port table failed %d\n", ret); + goto err; + } + + if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) { + printf("trying to set port state on disabled port %d\n", idx); + goto err; + } + + nfp_nsp_config_set_state(nsp, entries, idx); + return nsp; + +err: + nfp_nsp_close(nsp); + free(entries); + return NULL; +} + +void +nfp_eth_config_cleanup_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + + nfp_nsp_config_set_modified(nsp, 0); + nfp_nsp_config_clear_state(nsp); + nfp_nsp_close(nsp); + free(entries); +} + +/* + * nfp_eth_config_commit_end() - perform recorded configuration changes + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * + * Perform the configuration which was requested with __nfp_eth_set_*() + * helpers and recorded in @nsp state. If device was already configured + * as requested or no __nfp_eth_set_*() operations were made no NSP command + * will be performed. + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_config_commit_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + int ret = 1; + + if (nfp_nsp_config_modified(nsp)) { + ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + ret = ret < 0 ? ret : 0; + } + + nfp_eth_config_cleanup_end(nsp); + + return ret; +} + +/* + * nfp_eth_set_mod_enable() - set PHY module enable control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @enable: Desired state + * + * Enable or disable PHY module (this usually means setting the TX lanes + * disable bits). + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + uint64_t reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (!nsp) + return -1; + + entries = nfp_nsp_config_entries(nsp); + + /* Check if we are already in requested state */ + reg = rte_le_to_cpu_64(entries[idx].state); + if (enable != (int)FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) { + reg = rte_le_to_cpu_64(entries[idx].control); + reg &= ~NSP_ETH_CTRL_ENABLED; + reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); + entries[idx].control = rte_cpu_to_le_64(reg); + + nfp_nsp_config_set_modified(nsp, 1); + } + + return nfp_eth_config_commit_end(nsp); +} + +/* + * nfp_eth_set_configured() - set PHY module configured control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @configed: Desired state + * + * Set the ifup/ifdown state on the PHY. + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, int configed) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + uint64_t reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (!nsp) + return -EIO; + + /* + * Older ABI versions did support this feature, however this has only + * been reliable since ABI 20. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 20) { + nfp_eth_config_cleanup_end(nsp); + return -EOPNOTSUPP; + } + + entries = nfp_nsp_config_entries(nsp); + + /* Check if we are already in requested state */ + reg = rte_le_to_cpu_64(entries[idx].state); + if (configed != (int)FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) { + reg = rte_le_to_cpu_64(entries[idx].control); + reg &= ~NSP_ETH_CTRL_CONFIGURED; + reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed); + entries[idx].control = rte_cpu_to_le_64(reg); + + nfp_nsp_config_set_modified(nsp, 1); + } + + return nfp_eth_config_commit_end(nsp); +} + +static int +nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, + const uint64_t mask, const unsigned int shift, + unsigned int val, const uint64_t ctrl_bit) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + unsigned int idx = nfp_nsp_config_idx(nsp); + uint64_t reg; + + /* + * Note: set features were added in ABI 0.14 but the error + * codes were initially not populated correctly. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) { + printf("set operations not supported, please update flash\n"); + return -EOPNOTSUPP; + } + + /* Check if we are already in requested state */ + reg = rte_le_to_cpu_64(entries[idx].raw[raw_idx]); + if (val == (reg & mask) >> shift) + return 0; + + reg &= ~mask; + reg |= (val << shift) & mask; + entries[idx].raw[raw_idx] = rte_cpu_to_le_64(reg); + + entries[idx].control |= rte_cpu_to_le_64(ctrl_bit); + + nfp_nsp_config_set_modified(nsp, 1); + + return 0; +} + +#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \ + (__extension__ ({ \ + typeof(mask) _x = (mask); \ + nfp_eth_set_bit_config(nsp, raw_idx, _x, __bf_shf(_x), \ + val, ctrl_bit); \ + })) + +/* + * __nfp_eth_set_aneg() - set PHY autonegotiation control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired autonegotiation mode + * + * Allow/disallow PHY module to advertise/perform autonegotiation. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int +__nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_ANEG, mode, + NSP_ETH_CTRL_SET_ANEG); +} + +/* + * __nfp_eth_set_fec() - set PHY forward error correction control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired fec mode + * + * Set the PHY module forward error correction mode. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +static int +__nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_FEC, mode, + NSP_ETH_CTRL_SET_FEC); +} + +/* + * nfp_eth_set_fec() - set PHY forward error correction control mode + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @mode: Desired fec mode + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode) +{ + struct nfp_nsp *nsp; + int err; + + nsp = nfp_eth_config_start(cpp, idx); + if (!nsp) + return -EIO; + + err = __nfp_eth_set_fec(nsp, mode); + if (err) { + nfp_eth_config_cleanup_end(nsp); + return err; + } + + return nfp_eth_config_commit_end(nsp); +} + +/* + * __nfp_eth_set_speed() - set interface speed/rate + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @speed: Desired speed (per lane) + * + * Set lane speed. Provided @speed value should be subport speed divided + * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for + * 50G, etc.) + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int +__nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed) +{ + enum nfp_eth_rate rate; + + rate = nfp_eth_speed2rate(speed); + if (rate == RATE_INVALID) { + printf("could not find matching lane rate for speed %u\n", + speed); + return -EINVAL; + } + + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_RATE, rate, + NSP_ETH_CTRL_SET_RATE); +} + +/* + * __nfp_eth_set_split() - set interface lane split + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @lanes: Desired lanes per port + * + * Set number of lanes in the port. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int +__nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES, + lanes, NSP_ETH_CTRL_SET_LANES); +} diff --git a/drivers/net/nfp/nfpcore/nfp_resource.c b/drivers/net/nfp/nfpcore/nfp_resource.c new file mode 100644 index 00000000..dd41fa4d --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_resource.c @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#include +#include +#include + +#include + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" +#include "nfp_resource.h" +#include "nfp_crc.h" + +#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU +#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL + +/* NFP Resource Table self-identifier */ +#define NFP_RESOURCE_TBL_NAME "nfp.res" +#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ + +#define NFP_RESOURCE_ENTRY_NAME_SZ 8 + +/* + * struct nfp_resource_entry - Resource table entry + * @owner: NFP CPP Lock, interface owner + * @key: NFP CPP Lock, posix_crc32(name, 8) + * @region: Memory region descriptor + * @name: ASCII, zero padded name + * @reserved + * @cpp_action: CPP Action + * @cpp_token: CPP Token + * @cpp_target: CPP Target ID + * @page_offset: 256-byte page offset into target's CPP address + * @page_size: size, in 256-byte pages + */ +struct nfp_resource_entry { + struct nfp_resource_entry_mutex { + uint32_t owner; + uint32_t key; + } mutex; + struct nfp_resource_entry_region { + uint8_t name[NFP_RESOURCE_ENTRY_NAME_SZ]; + uint8_t reserved[5]; + uint8_t cpp_action; + uint8_t cpp_token; + uint8_t cpp_target; + uint32_t page_offset; + uint32_t page_size; + } region; +}; + +#define NFP_RESOURCE_TBL_SIZE 4096 +#define NFP_RESOURCE_TBL_ENTRIES (int)(NFP_RESOURCE_TBL_SIZE / \ + sizeof(struct nfp_resource_entry)) + +struct nfp_resource { + char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1]; + uint32_t cpp_id; + uint64_t addr; + uint64_t size; + struct nfp_cpp_mutex *mutex; +}; + +static int +nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) +{ + char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ + 2]; + struct nfp_resource_entry entry; + uint32_t cpp_id, key; + int ret, i; + + cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ + + memset(name_pad, 0, sizeof(name_pad)); + strlcpy(name_pad, res->name, sizeof(name_pad)); + + /* Search for a matching entry */ + if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) { + printf("Grabbing device lock not supported\n"); + return -EOPNOTSUPP; + } + key = nfp_crc32_posix(name_pad, NFP_RESOURCE_ENTRY_NAME_SZ); + + for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { + uint64_t addr = NFP_RESOURCE_TBL_BASE + + sizeof(struct nfp_resource_entry) * i; + + ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry)); + if (ret != sizeof(entry)) + return -EIO; + + if (entry.mutex.key != key) + continue; + + /* Found key! */ + res->mutex = + nfp_cpp_mutex_alloc(cpp, + NFP_RESOURCE_TBL_TARGET, addr, key); + res->cpp_id = NFP_CPP_ID(entry.region.cpp_target, + entry.region.cpp_action, + entry.region.cpp_token); + res->addr = ((uint64_t)entry.region.page_offset) << 8; + res->size = (uint64_t)entry.region.page_size << 8; + return 0; + } + + return -ENOENT; +} + +static int +nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res, + struct nfp_cpp_mutex *dev_mutex) +{ + int err; + + if (nfp_cpp_mutex_lock(dev_mutex)) + return -EINVAL; + + err = nfp_cpp_resource_find(cpp, res); + if (err) + goto err_unlock_dev; + + err = nfp_cpp_mutex_trylock(res->mutex); + if (err) + goto err_res_mutex_free; + + nfp_cpp_mutex_unlock(dev_mutex); + + return 0; + +err_res_mutex_free: + nfp_cpp_mutex_free(res->mutex); +err_unlock_dev: + nfp_cpp_mutex_unlock(dev_mutex); + + return err; +} + +/* + * nfp_resource_acquire() - Acquire a resource handle + * @cpp: NFP CPP handle + * @name: Name of the resource + * + * NOTE: This function locks the acquired resource + * + * Return: NFP Resource handle, or ERR_PTR() + */ +struct nfp_resource * +nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) +{ + struct nfp_cpp_mutex *dev_mutex; + struct nfp_resource *res; + int err; + struct timespec wait; + int count; + + res = malloc(sizeof(*res)); + if (!res) + return NULL; + + memset(res, 0, sizeof(*res)); + + strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ); + + dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE, + NFP_RESOURCE_TBL_KEY); + if (!dev_mutex) { + free(res); + return NULL; + } + + wait.tv_sec = 0; + wait.tv_nsec = 1000000; + count = 0; + + for (;;) { + err = nfp_resource_try_acquire(cpp, res, dev_mutex); + if (!err) + break; + if (err != -EBUSY) + goto err_free; + + if (count++ > 1000) { + printf("Error: resource %s timed out\n", name); + err = -EBUSY; + goto err_free; + } + + nanosleep(&wait, NULL); + } + + nfp_cpp_mutex_free(dev_mutex); + + return res; + +err_free: + nfp_cpp_mutex_free(dev_mutex); + free(res); + return NULL; +} + +/* + * nfp_resource_release() - Release a NFP Resource handle + * @res: NFP Resource handle + * + * NOTE: This function implictly unlocks the resource handle + */ +void +nfp_resource_release(struct nfp_resource *res) +{ + nfp_cpp_mutex_unlock(res->mutex); + nfp_cpp_mutex_free(res->mutex); + free(res); +} + +/* + * nfp_resource_cpp_id() - Return the cpp_id of a resource handle + * @res: NFP Resource handle + * + * Return: NFP CPP ID + */ +uint32_t +nfp_resource_cpp_id(const struct nfp_resource *res) +{ + return res->cpp_id; +} + +/* + * nfp_resource_name() - Return the name of a resource handle + * @res: NFP Resource handle + * + * Return: const char pointer to the name of the resource + */ +const char +*nfp_resource_name(const struct nfp_resource *res) +{ + return res->name; +} + +/* + * nfp_resource_address() - Return the address of a resource handle + * @res: NFP Resource handle + * + * Return: Address of the resource + */ +uint64_t +nfp_resource_address(const struct nfp_resource *res) +{ + return res->addr; +} + +/* + * nfp_resource_size() - Return the size in bytes of a resource handle + * @res: NFP Resource handle + * + * Return: Size of the resource in bytes + */ +uint64_t +nfp_resource_size(const struct nfp_resource *res) +{ + return res->size; +} diff --git a/drivers/net/nfp/nfpcore/nfp_resource.h b/drivers/net/nfp/nfpcore/nfp_resource.h new file mode 100644 index 00000000..06cc6f74 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_resource.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef NFP_RESOURCE_H +#define NFP_RESOURCE_H + +#include "nfp_cpp.h" + +#define NFP_RESOURCE_NFP_NFFW "nfp.nffw" +#define NFP_RESOURCE_NFP_HWINFO "nfp.info" +#define NFP_RESOURCE_NSP "nfp.sp" + +/** + * Opaque handle to a NFP Resource + */ +struct nfp_resource; + +struct nfp_resource *nfp_resource_acquire(struct nfp_cpp *cpp, + const char *name); + +/** + * Release a NFP Resource, and free the handle + * @param[in] res NFP Resource handle + */ +void nfp_resource_release(struct nfp_resource *res); + +/** + * Return the CPP ID of a NFP Resource + * @param[in] res NFP Resource handle + * @return CPP ID of the NFP Resource + */ +uint32_t nfp_resource_cpp_id(const struct nfp_resource *res); + +/** + * Return the name of a NFP Resource + * @param[in] res NFP Resource handle + * @return Name of the NFP Resource + */ +const char *nfp_resource_name(const struct nfp_resource *res); + +/** + * Return the target address of a NFP Resource + * @param[in] res NFP Resource handle + * @return Address of the NFP Resource + */ +uint64_t nfp_resource_address(const struct nfp_resource *res); + +uint64_t nfp_resource_size(const struct nfp_resource *res); + +#endif /* NFP_RESOURCE_H */ diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.c b/drivers/net/nfp/nfpcore/nfp_rtsym.c new file mode 100644 index 00000000..cb7d83db --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_rtsym.c @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +/* + * nfp_rtsym.c + * Interface for accessing run-time symbol table + */ + +#include +#include +#include "nfp_cpp.h" +#include "nfp_mip.h" +#include "nfp_rtsym.h" +#include "nfp6000/nfp6000.h" + +/* These need to match the linker */ +#define SYM_TGT_LMEM 0 +#define SYM_TGT_EMU_CACHE 0x17 + +struct nfp_rtsym_entry { + uint8_t type; + uint8_t target; + uint8_t island; + uint8_t addr_hi; + uint32_t addr_lo; + uint16_t name; + uint8_t menum; + uint8_t size_hi; + uint32_t size_lo; +}; + +struct nfp_rtsym_table { + struct nfp_cpp *cpp; + int num; + char *strtab; + struct nfp_rtsym symtab[]; +}; + +static int +nfp_meid(uint8_t island_id, uint8_t menum) +{ + return (island_id & 0x3F) == island_id && menum < 12 ? + (island_id << 4) | (menum + 4) : -1; +} + +static void +nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, uint32_t strtab_size, + struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw) +{ + sw->type = fw->type; + sw->name = cache->strtab + rte_le_to_cpu_16(fw->name) % strtab_size; + sw->addr = ((uint64_t)fw->addr_hi << 32) | + rte_le_to_cpu_32(fw->addr_lo); + sw->size = ((uint64_t)fw->size_hi << 32) | + rte_le_to_cpu_32(fw->size_lo); + +#ifdef DEBUG + printf("rtsym_entry_init\n"); + printf("\tname=%s, addr=%" PRIx64 ", size=%" PRIu64 ",target=%d\n", + sw->name, sw->addr, sw->size, sw->target); +#endif + switch (fw->target) { + case SYM_TGT_LMEM: + sw->target = NFP_RTSYM_TARGET_LMEM; + break; + case SYM_TGT_EMU_CACHE: + sw->target = NFP_RTSYM_TARGET_EMU_CACHE; + break; + default: + sw->target = fw->target; + break; + } + + if (fw->menum != 0xff) + sw->domain = nfp_meid(fw->island, fw->menum); + else if (fw->island != 0xff) + sw->domain = fw->island; + else + sw->domain = -1; +} + +struct nfp_rtsym_table * +nfp_rtsym_table_read(struct nfp_cpp *cpp) +{ + struct nfp_rtsym_table *rtbl; + struct nfp_mip *mip; + + mip = nfp_mip_open(cpp); + rtbl = __nfp_rtsym_table_read(cpp, mip); + nfp_mip_close(mip); + + return rtbl; +} + +/* + * This looks more complex than it should be. But we need to get the type for + * the ~ right in round_down (it needs to be as wide as the result!), and we + * want to evaluate the macro arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y) - 1)) + +#define round_up(x, y) \ + (__extension__ ({ \ + typeof(x) _x = (x); \ + ((((_x) - 1) | __round_mask(_x, y)) + 1); \ + })) + +#define round_down(x, y) \ + (__extension__ ({ \ + typeof(x) _x = (x); \ + ((_x) & ~__round_mask(_x, y)); \ + })) + +struct nfp_rtsym_table * +__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip) +{ + uint32_t strtab_addr, symtab_addr, strtab_size, symtab_size; + struct nfp_rtsym_entry *rtsymtab; + struct nfp_rtsym_table *cache; + const uint32_t dram = + NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) | + NFP_ISL_EMEM0; + int err, n, size; + + if (!mip) + return NULL; + + nfp_mip_strtab(mip, &strtab_addr, &strtab_size); + nfp_mip_symtab(mip, &symtab_addr, &symtab_size); + + if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab)) + return NULL; + + /* Align to 64 bits */ + symtab_size = round_up(symtab_size, 8); + strtab_size = round_up(strtab_size, 8); + + rtsymtab = malloc(symtab_size); + if (!rtsymtab) + return NULL; + + size = sizeof(*cache); + size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym); + size += strtab_size + 1; + cache = malloc(size); + if (!cache) + goto exit_free_rtsym_raw; + + cache->cpp = cpp; + cache->num = symtab_size / sizeof(*rtsymtab); + cache->strtab = (void *)&cache->symtab[cache->num]; + + err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size); + if (err != (int)symtab_size) + goto exit_free_cache; + + err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size); + if (err != (int)strtab_size) + goto exit_free_cache; + cache->strtab[strtab_size] = '\0'; + + for (n = 0; n < cache->num; n++) + nfp_rtsym_sw_entry_init(cache, strtab_size, + &cache->symtab[n], &rtsymtab[n]); + + free(rtsymtab); + + return cache; + +exit_free_cache: + free(cache); +exit_free_rtsym_raw: + free(rtsymtab); + return NULL; +} + +/* + * nfp_rtsym_count() - Get the number of RTSYM descriptors + * @rtbl: NFP RTsym table + * + * Return: Number of RTSYM descriptors + */ +int +nfp_rtsym_count(struct nfp_rtsym_table *rtbl) +{ + if (!rtbl) + return -EINVAL; + + return rtbl->num; +} + +/* + * nfp_rtsym_get() - Get the Nth RTSYM descriptor + * @rtbl: NFP RTsym table + * @idx: Index (0-based) of the RTSYM descriptor + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym * +nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx) +{ + if (!rtbl) + return NULL; + + if (idx >= rtbl->num) + return NULL; + + return &rtbl->symtab[idx]; +} + +/* + * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name + * @rtbl: NFP RTsym table + * @name: Symbol name + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym * +nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) +{ + int n; + + if (!rtbl) + return NULL; + + for (n = 0; n < rtbl->num; n++) + if (strcmp(name, rtbl->symtab[n].name) == 0) + return &rtbl->symtab[n]; + + return NULL; +} + +/* + * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol + * @rtbl: NFP RTsym table + * @name: Symbol name + * @error: Poniter to error code (optional) + * + * Lookup a symbol, map, read it and return it's value. Value of the symbol + * will be interpreted as a simple little-endian unsigned value. Symbol can + * be 4 or 8 bytes in size. + * + * Return: value read, on error sets the error and returns ~0ULL. + */ +uint64_t +nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) +{ + const struct nfp_rtsym *sym; + uint32_t val32, id; + uint64_t val; + int err; + + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) { + err = -ENOENT; + goto exit; + } + + id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); + +#ifdef DEBUG + printf("Reading symbol %s with size %" PRIu64 " at %" PRIx64 "\n", + name, sym->size, sym->addr); +#endif + switch (sym->size) { + case 4: + err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32); + val = val32; + break; + case 8: + err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val); + break; + default: + printf("rtsym '%s' unsupported size: %" PRId64 "\n", + name, sym->size); + err = -EINVAL; + break; + } + + if (err) + err = -EIO; +exit: + if (error) + *error = err; + + if (err) + return ~0ULL; + + return val; +} + +uint8_t * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + unsigned int min_size, struct nfp_cpp_area **area) +{ + const struct nfp_rtsym *sym; + uint8_t *mem; + +#ifdef DEBUG + printf("mapping symbol %s\n", name); +#endif + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) { + printf("symbol lookup fails for %s\n", name); + return NULL; + } + + if (sym->size < min_size) { + printf("Symbol %s too small (%" PRIu64 " < %u)\n", name, + sym->size, min_size); + return NULL; + } + + mem = nfp_cpp_map_area(rtbl->cpp, sym->domain, sym->target, sym->addr, + sym->size, area); + if (!mem) { + printf("Failed to map symbol %s\n", name); + return NULL; + } +#ifdef DEBUG + printf("symbol %s with address %p\n", name, mem); +#endif + + return mem; +} diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.h b/drivers/net/nfp/nfpcore/nfp_rtsym.h new file mode 100644 index 00000000..8b494211 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_rtsym.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef __NFP_RTSYM_H__ +#define __NFP_RTSYM_H__ + +#define NFP_RTSYM_TYPE_NONE 0 +#define NFP_RTSYM_TYPE_OBJECT 1 +#define NFP_RTSYM_TYPE_FUNCTION 2 +#define NFP_RTSYM_TYPE_ABS 3 + +#define NFP_RTSYM_TARGET_NONE 0 +#define NFP_RTSYM_TARGET_LMEM -1 +#define NFP_RTSYM_TARGET_EMU_CACHE -7 + +/* + * Structure describing a run-time NFP symbol. + * + * The memory target of the symbol is generally the CPP target number and can be + * used directly by the nfp_cpp API calls. However, in some cases (i.e., for + * local memory or control store) the target is encoded using a negative number. + * + * When the target type can not be used to fully describe the location of a + * symbol the domain field is used to further specify the location (i.e., the + * specific ME or island number). + * + * For ME target resources, 'domain' is an MEID. + * For Island target resources, 'domain' is an island ID, with the one exception + * of "sram" symbols for backward compatibility, which are viewed as global. + */ +struct nfp_rtsym { + const char *name; + uint64_t addr; + uint64_t size; + int type; + int target; + int domain; +}; + +struct nfp_rtsym_table; + +struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp); + +struct nfp_rtsym_table * +__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip); + +int nfp_rtsym_count(struct nfp_rtsym_table *rtbl); + +const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx); + +const struct nfp_rtsym * +nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); + +uint64_t nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, + int *error); +uint8_t * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + unsigned int min_size, struct nfp_cpp_area **area); +#endif diff --git a/drivers/net/nfp/nfpcore/nfp_target.h b/drivers/net/nfp/nfpcore/nfp_target.h new file mode 100644 index 00000000..2884a003 --- /dev/null +++ b/drivers/net/nfp/nfpcore/nfp_target.h @@ -0,0 +1,579 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Netronome Systems, Inc. + * All rights reserved. + */ + +#ifndef NFP_TARGET_H +#define NFP_TARGET_H + +#include "nfp-common/nfp_resid.h" +#include "nfp-common/nfp_cppat.h" +#include "nfp-common/nfp_platform.h" +#include "nfp_cpp.h" + +#define P32 1 +#define P64 2 + +#define PUSHPULL(_pull, _push) (((_pull) << 4) | ((_push) << 0)) + +#ifndef NFP_ERRNO +#include +#define NFP_ERRNO(x) (errno = (x), -1) +#endif + +static inline int +pushpull_width(int pp) +{ + pp &= 0xf; + + if (pp == 0) + return NFP_ERRNO(EINVAL); + return (2 << pp); +} + +#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0) +#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4) + +static inline int +target_rw(uint32_t cpp_id, int pp, int start, int len) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < start || island > (start + len))) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): + return PUSHPULL(0, pp); + case NFP_CPP_ID(0, 1, 0): + return PUSHPULL(pp, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(pp, pp); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_dma(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiDma */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiDma */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_stats(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiStats */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiStats */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_tm(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiTM */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiTM */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi_ppc(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 0): /* ReadNbiPreclassifier */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* WriteNbiPreclassifier */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): + return PUSHPULL(P64, P64); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_nbi(uint32_t cpp_id, uint64_t address) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + uint64_t rel_addr = address & 0x3fFFFF; + + if (island && (island < 8 || island > 9)) + return NFP_ERRNO(EINVAL); + + if (rel_addr < (1 << 20)) + return nfp6000_nbi_dma(cpp_id); + if (rel_addr < (2 << 20)) + return nfp6000_nbi_stats(cpp_id); + if (rel_addr < (3 << 20)) + return nfp6000_nbi_tm(cpp_id); + return nfp6000_nbi_ppc(cpp_id); +} + +/* + * This structure ONLY includes items that can be done with a read or write of + * 32-bit or 64-bit words. All others are not listed. + */ +static inline int +nfp6000_mu_common(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): /* read_be/write_be */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): /* read_le/write_le */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 2): /* {read/write}_swap_be */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 3): /* {read/write}_swap_le */ + return PUSHPULL(P64, P64); + case NFP_CPP_ID(0, 0, 0): /* read_be */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 0, 1): /* read_le */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 0, 2): /* read_swap_be */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 0, 3): /* read_swap_le */ + return PUSHPULL(0, P64); + case NFP_CPP_ID(0, 1, 0): /* write_be */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 1, 1): /* write_le */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 1, 2): /* write_swap_be */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 1, 3): /* write_swap_le */ + return PUSHPULL(P64, 0); + case NFP_CPP_ID(0, 3, 0): /* atomic_read */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 2): /* mask_compare_write */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 0): /* atomic_write */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 2): /* atomic_write_imm */ + return PUSHPULL(0, 0); + case NFP_CPP_ID(0, 4, 3): /* swap_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 5, 0): /* set */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 5, 3): /* test_set_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 6, 0): /* clr */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 3): /* test_clr_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 7, 0): /* add */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 7, 3): /* test_add_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 8, 0): /* addsat */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 8, 3): /* test_subsat_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 0): /* sub */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 9, 3): /* test_sub_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 10, 0): /* subsat */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 10, 3): /* test_subsat_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 13, 0): /* microq128_get */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 13, 1): /* microq128_pop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 13, 2): /* microq128_put */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 15, 0): /* xor */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 15, 3): /* test_xor_imm */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 0): /* read32_be */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 1): /* read32_le */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 2): /* read32_swap_be */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 28, 3): /* read32_swap_le */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 31, 0): /* write32_be */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 31, 1): /* write32_le */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 31, 2): /* write32_swap_be */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 31, 3): /* write32_swap_le */ + return PUSHPULL(P32, 0); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp6000_mu_ctm(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 16, 1): /* packet_read_packet_status */ + return PUSHPULL(0, P32); + default: + return nfp6000_mu_common(cpp_id); + } +} + +static inline int +nfp6000_mu_emu(uint32_t cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 18, 0): /* read_queue */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 18, 1): /* read_queue_ring */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 18, 2): /* write_queue */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 18, 3): /* write_queue_ring */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 20, 2): /* journal */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 21, 0): /* get */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 21, 1): /* get_eop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 21, 2): /* get_freely */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 22, 0): /* pop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 22, 1): /* pop_eop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 22, 2): /* pop_freely */ + return PUSHPULL(0, P32); + default: + return nfp6000_mu_common(cpp_id); + } +} + +static inline int +nfp6000_mu_imu(uint32_t cpp_id) +{ + return nfp6000_mu_common(cpp_id); +} + +static inline int +nfp6000_mu(uint32_t cpp_id, uint64_t address) +{ + int pp; + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island == 0) { + if (address < 0x2000000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x8000000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0x9800000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x9C00000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0xA000000000ULL) + pp = nfp6000_mu_imu(cpp_id); + else + pp = nfp6000_mu_ctm(cpp_id); + } else if (island >= 24 && island <= 27) { + pp = nfp6000_mu_emu(cpp_id); + } else if (island >= 28 && island <= 31) { + pp = nfp6000_mu_imu(cpp_id); + } else if (island == 1 || + (island >= 4 && island <= 7) || + (island >= 12 && island <= 13) || + (island >= 32 && island <= 47) || + (island >= 48 && island <= 51)) { + pp = nfp6000_mu_ctm(cpp_id); + } else { + pp = NFP_ERRNO(EINVAL); + } + + return pp; +} + +static inline int +nfp6000_ila(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 48 || island > 51)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 1): /* read_check_error */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 0): /* read_int */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 0): /* write_int */ + return PUSHPULL(P32, 0); + default: + return target_rw(cpp_id, P32, 48, 4); + } +} + +static inline int +nfp6000_pci(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 4 || island > 7)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 2, 0): + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 0): + return PUSHPULL(P32, 0); + default: + return target_rw(cpp_id, P32, 4, 4); + } +} + +static inline int +nfp6000_crypto(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 12 || island > 15)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 2, 0): + return PUSHPULL(P64, 0); + default: + return target_rw(cpp_id, P64, 12, 4); + } +} + +static inline int +nfp6000_cap_xpb(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 1 || island > 63)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 1): /* RingGet */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 0, 2): /* Interthread Signal */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 1, 1): /* RingPut */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 1, 2): /* CTNNWr */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 2, 0): /* ReflectRd, signal none */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 1): /* ReflectRd, signal self */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 2): /* ReflectRd, signal remote */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 2, 3): /* ReflectRd, signal both */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 3, 0): /* ReflectWr, signal none */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 3, 1): /* ReflectWr, signal self */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 3, 2): /* ReflectWr, signal remote */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 3, 3): /* ReflectWr, signal both */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): + return PUSHPULL(P32, P32); + default: + return target_rw(cpp_id, P32, 1, 63); + } +} + +static inline int +nfp6000_cls(uint32_t cpp_id) +{ + int island = NFP_CPP_ID_ISLAND_of(cpp_id); + + if (island && (island < 1 || island > 63)) + return NFP_ERRNO(EINVAL); + + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + case NFP_CPP_ID(0, 0, 3): /* xor */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 2, 0): /* set */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 2, 1): /* clr */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 0): /* add */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 4, 1): /* add64 */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 0): /* sub */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 1): /* sub64 */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 6, 2): /* subsat */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 8, 2): /* hash_mask */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 8, 3): /* hash_clear */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 9, 0): /* ring_get */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 1): /* ring_pop */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 2): /* ring_get_freely */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 9, 3): /* ring_pop_freely */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 10, 0): /* ring_put */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 10, 2): /* ring_journal */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 14, 0): /* reflect_write_sig_local */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 15, 1): /* reflect_read_sig_local */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 17, 2): /* statistic */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 24, 0): /* ring_read */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 24, 1): /* ring_write */ + return PUSHPULL(P32, 0); + case NFP_CPP_ID(0, 25, 0): /* ring_workq_add_thread */ + return PUSHPULL(0, P32); + case NFP_CPP_ID(0, 25, 1): /* ring_workq_add_work */ + return PUSHPULL(P32, 0); + default: + return target_rw(cpp_id, P32, 0, 64); + } +} + +static inline int +nfp6000_target_pushpull(uint32_t cpp_id, uint64_t address) +{ + switch (NFP_CPP_ID_TARGET_of(cpp_id)) { + case NFP6000_CPPTGT_NBI: + return nfp6000_nbi(cpp_id, address); + case NFP6000_CPPTGT_VQDR: + return target_rw(cpp_id, P32, 24, 4); + case NFP6000_CPPTGT_ILA: + return nfp6000_ila(cpp_id); + case NFP6000_CPPTGT_MU: + return nfp6000_mu(cpp_id, address); + case NFP6000_CPPTGT_PCIE: + return nfp6000_pci(cpp_id); + case NFP6000_CPPTGT_ARM: + if (address < 0x10000) + return target_rw(cpp_id, P64, 1, 1); + else + return target_rw(cpp_id, P32, 1, 1); + case NFP6000_CPPTGT_CRYPTO: + return nfp6000_crypto(cpp_id); + case NFP6000_CPPTGT_CTXPB: + return nfp6000_cap_xpb(cpp_id); + case NFP6000_CPPTGT_CLS: + return nfp6000_cls(cpp_id); + case 0: + return target_rw(cpp_id, P32, 4, 4); + default: + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp_target_pushpull_width(int pp, int write_not_read) +{ + if (pp < 0) + return pp; + + if (write_not_read) + return PULL_WIDTH(pp); + else + return PUSH_WIDTH(pp); +} + +static inline int +nfp6000_target_action_width(uint32_t cpp_id, uint64_t address, + int write_not_read) +{ + int pp; + + pp = nfp6000_target_pushpull(cpp_id, address); + + return nfp_target_pushpull_width(pp, write_not_read); +} + +static inline int +nfp_target_action_width(uint32_t model, uint32_t cpp_id, uint64_t address, + int write_not_read) +{ + if (NFP_CPP_MODEL_IS_6000(model)) { + return nfp6000_target_action_width(cpp_id, address, + write_not_read); + } else { + return NFP_ERRNO(EINVAL); + } +} + +static inline int +nfp_target_cpp(uint32_t cpp_island_id, uint64_t cpp_island_address, + uint32_t *cpp_target_id, uint64_t *cpp_target_address, + const uint32_t *imb_table) +{ + int err; + int island = NFP_CPP_ID_ISLAND_of(cpp_island_id); + int target = NFP_CPP_ID_TARGET_of(cpp_island_id); + uint32_t imb; + + if (target < 0 || target >= 16) + return NFP_ERRNO(EINVAL); + + if (island == 0) { + /* Already translated */ + *cpp_target_id = cpp_island_id; + *cpp_target_address = cpp_island_address; + return 0; + } + + if (!imb_table) { + /* CPP + Island only allowed on systems with IMB tables */ + return NFP_ERRNO(EINVAL); + } + + imb = imb_table[target]; + + *cpp_target_address = cpp_island_address; + err = _nfp6000_cppat_addr_encode(cpp_target_address, island, target, + ((imb >> 13) & 7), + ((imb >> 12) & 1), + ((imb >> 6) & 0x3f), + ((imb >> 0) & 0x3f)); + if (err == 0) { + *cpp_target_id = + NFP_CPP_ID(target, NFP_CPP_ID_ACTION_of(cpp_island_id), + NFP_CPP_ID_TOKEN_of(cpp_island_id)); + } + + return err; +} + +#endif /* NFP_TARGET_H */ diff --git a/drivers/net/null/meson.build b/drivers/net/null/meson.build index 68ac0d2a..60e2ce6c 100644 --- a/drivers/net/null/meson.build +++ b/drivers/net/null/meson.build @@ -1,4 +1,5 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation +version = 2 sources = files('rte_eth_null.c') diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c index d003b283..1d2e6b9e 100644 --- a/drivers/net/null/rte_eth_null.c +++ b/drivers/net/null/rte_eth_null.c @@ -73,6 +73,7 @@ struct pmd_internals { struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT]; struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT]; + struct ether_addr eth_addr; /** Bit mask of RSS offloads, the bit offset also means flow type */ uint64_t flow_type_rss_offloads; @@ -84,16 +85,19 @@ struct pmd_internals { uint8_t rss_key[40]; /**< 40-byte hash key. */ }; - - -static struct ether_addr eth_addr = { .addr_bytes = {0} }; static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG, + .link_autoneg = ETH_LINK_FIXED, }; +static int eth_null_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_null_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + static uint16_t eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { @@ -105,10 +109,10 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) return 0; packet_size = h->internals->packet_size; + if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0) + return 0; + for (i = 0; i < nb_bufs; i++) { - bufs[i] = rte_pktmbuf_alloc(h->mb_pool); - if (!bufs[i]) - break; bufs[i]->data_len = (uint16_t)packet_size; bufs[i]->pkt_len = packet_size; bufs[i]->port = h->internals->port_id; @@ -130,10 +134,10 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) return 0; packet_size = h->internals->packet_size; + if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0) + return 0; + for (i = 0; i < nb_bufs; i++) { - bufs[i] = rte_pktmbuf_alloc(h->mb_pool); - if (!bufs[i]) - break; rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet, packet_size); bufs[i]->data_len = (uint16_t)packet_size; @@ -461,10 +465,11 @@ eth_rss_hash_conf_get(struct rte_eth_dev *dev, return 0; } -static void +static int eth_mac_address_set(__rte_unused struct rte_eth_dev *dev, __rte_unused struct ether_addr *addr) { + return 0; } static const struct eth_dev_ops ops = { @@ -496,7 +501,7 @@ eth_dev_null_create(struct rte_vdev_device *dev, { const unsigned nb_rx_queues = 1; const unsigned nb_tx_queues = 1; - struct rte_eth_dev_data *data = NULL; + struct rte_eth_dev_data *data; struct pmd_internals *internals = NULL; struct rte_eth_dev *eth_dev = NULL; @@ -510,22 +515,12 @@ eth_dev_null_create(struct rte_vdev_device *dev, if (dev->device.numa_node == SOCKET_ID_ANY) dev->device.numa_node = rte_socket_id(); - RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n", + PMD_LOG(INFO, "Creating null ethdev on numa socket %u", dev->device.numa_node); - /* now do all data allocation - for eth_dev structure, dummy pci driver - * and internal (private) data - */ - data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0, - dev->device.numa_node); - if (!data) - return -ENOMEM; - eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals)); - if (!eth_dev) { - rte_free(data); + if (!eth_dev) return -ENOMEM; - } /* now put it all together * - store queue data in internals, @@ -540,19 +535,19 @@ eth_dev_null_create(struct rte_vdev_device *dev, internals->packet_size = packet_size; internals->packet_copy = packet_copy; internals->port_id = eth_dev->data->port_id; + eth_random_addr(internals->eth_addr.addr_bytes); internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE; rte_memcpy(internals->rss_key, default_rss_key, 40); - rte_memcpy(data, eth_dev->data, sizeof(*data)); + data = eth_dev->data; data->nb_rx_queues = (uint16_t)nb_rx_queues; data->nb_tx_queues = (uint16_t)nb_tx_queues; data->dev_link = pmd_link; - data->mac_addrs = ð_addr; + data->mac_addrs = &internals->eth_addr; - eth_dev->data = data; eth_dev->dev_ops = &ops; /* finally assign rx and tx ops */ @@ -564,6 +559,7 @@ eth_dev_null_create(struct rte_vdev_device *dev, eth_dev->tx_pkt_burst = eth_null_tx; } + rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -608,6 +604,7 @@ rte_pmd_null_probe(struct rte_vdev_device *dev) unsigned packet_size = default_packet_size; unsigned packet_copy = default_packet_copy; struct rte_kvargs *kvlist = NULL; + struct rte_eth_dev *eth_dev; int ret; if (!dev) @@ -615,7 +612,20 @@ rte_pmd_null_probe(struct rte_vdev_device *dev) name = rte_vdev_device_name(dev); params = rte_vdev_device_args(dev); - RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name); + PMD_LOG(INFO, "Initializing pmd_null for %s", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(params) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } if (params != NULL) { kvlist = rte_kvargs_parse(params, valid_arguments); @@ -641,8 +651,8 @@ rte_pmd_null_probe(struct rte_vdev_device *dev) } } - RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, " - "packet copy is %s\n", packet_size, + PMD_LOG(INFO, "Configure pmd_null: packet size is %d, " + "packet copy is %s", packet_size, packet_copy ? "enabled" : "disabled"); ret = eth_dev_null_create(dev, packet_size, packet_copy); @@ -661,7 +671,7 @@ rte_pmd_null_remove(struct rte_vdev_device *dev) if (!dev) return -EINVAL; - RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n", + PMD_LOG(INFO, "Closing null ethdev on numa socket %u", rte_socket_id()); /* find the ethdev entry */ @@ -670,7 +680,6 @@ rte_pmd_null_remove(struct rte_vdev_device *dev) return -1; rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); @@ -687,3 +696,12 @@ RTE_PMD_REGISTER_ALIAS(net_null, eth_null); RTE_PMD_REGISTER_PARAM_STRING(net_null, "size= " "copy="); + +RTE_INIT(eth_null_init_log); +static void +eth_null_init_log(void) +{ + eth_null_logtype = rte_log_register("pmd.net.null"); + if (eth_null_logtype >= 0) + rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/octeontx/Makefile b/drivers/net/octeontx/Makefile index 3e4a1066..885f1768 100644 --- a/drivers/net/octeontx/Makefile +++ b/drivers/net/octeontx/Makefile @@ -10,6 +10,7 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_pmd_octeontx.a CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/ CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/ EXPORT_MAP := rte_pmd_octeontx_version.map @@ -46,7 +47,7 @@ endif CFLAGS_octeontx_ethdev.o += -DALLOW_EXPERIMENTAL_API LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_common_octeontx LDLIBS += -lrte_mempool_octeontx LDLIBS += -lrte_eventdev LDLIBS += -lrte_bus_pci diff --git a/drivers/net/octeontx/base/octeontx_bgx.c b/drivers/net/octeontx/base/octeontx_bgx.c index 8576d8ed..0e238826 100644 --- a/drivers/net/octeontx/base/octeontx_bgx.c +++ b/drivers/net/octeontx/base/octeontx_bgx.c @@ -19,7 +19,7 @@ octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf) hdr.msg = MBOX_BGX_PORT_OPEN; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_conf, len); + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len); if (res < 0) return -EACCES; @@ -49,7 +49,7 @@ octeontx_bgx_port_close(int port) hdr.msg = MBOX_BGX_PORT_CLOSE; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (res < 0) return -EACCES; @@ -66,7 +66,7 @@ octeontx_bgx_port_start(int port) hdr.msg = MBOX_BGX_PORT_START; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (res < 0) return -EACCES; @@ -83,7 +83,7 @@ octeontx_bgx_port_stop(int port) hdr.msg = MBOX_BGX_PORT_STOP; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (res < 0) return -EACCES; @@ -103,7 +103,7 @@ octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf) hdr.vfid = port; memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t)); - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_conf, len); + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len); if (res < 0) return -EACCES; @@ -135,7 +135,7 @@ octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat) hdr.msg = MBOX_BGX_PORT_GET_STATUS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_stat, len); + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stat, len); if (res < 0) return -EACCES; @@ -156,7 +156,7 @@ octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats) hdr.msg = MBOX_BGX_PORT_GET_STATS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_stats, len); + res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stats, len); if (res < 0) return -EACCES; @@ -181,7 +181,7 @@ octeontx_bgx_port_stats_clr(int port) hdr.msg = MBOX_BGX_PORT_CLR_STATS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (res < 0) return -EACCES; @@ -200,7 +200,7 @@ octeontx_bgx_port_link_status(int port) hdr.msg = MBOX_BGX_PORT_GET_LINK_STATUS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &link, len); + res = octeontx_mbox_send(&hdr, NULL, 0, &link, len); if (res < 0) return -EACCES; @@ -219,7 +219,7 @@ octeontx_bgx_port_promisc_set(int port, int en) hdr.vfid = port; prom = en ? 1 : 0; - res = octeontx_ssovf_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0); + res = octeontx_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0); if (res < 0) return -EACCES; @@ -237,7 +237,7 @@ octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr) hdr.msg = MBOX_BGX_PORT_SET_MACADDR; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, mac_addr, len, NULL, 0); + res = octeontx_mbox_send(&hdr, mac_addr, len, NULL, 0); if (res < 0) return -EACCES; diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c index 58a7f110..1babea0e 100644 --- a/drivers/net/octeontx/base/octeontx_pkivf.c +++ b/drivers/net/octeontx/base/octeontx_pkivf.c @@ -19,7 +19,7 @@ octeontx_pki_port_open(int port) hdr.msg = MBOX_PKI_PORT_OPEN; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); if (res < 0) return -EACCES; return res; @@ -38,7 +38,7 @@ octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg) hdr.msg = MBOX_PKI_PORT_HASH_CONFIG; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &h_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &h_cfg, len, NULL, 0); if (res < 0) return -EACCES; @@ -58,7 +58,7 @@ octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg) hdr.msg = MBOX_PKI_PORT_PKTBUF_CONFIG; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &b_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &b_cfg, len, NULL, 0); if (res < 0) return -EACCES; return res; @@ -77,7 +77,7 @@ octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg) hdr.msg = MBOX_PKI_PORT_CREATE_QOS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0); if (res < 0) return -EACCES; @@ -99,7 +99,7 @@ octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg) hdr.msg = MBOX_PKI_PORT_ERRCHK_CONFIG; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &e_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &e_cfg, len, NULL, 0); if (res < 0) return -EACCES; diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h index d036054c..764aff53 100644 --- a/drivers/net/octeontx/base/octeontx_pkivf.h +++ b/drivers/net/octeontx/base/octeontx_pkivf.h @@ -422,7 +422,7 @@ octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg) hdr.msg = MBOX_PKI_PORT_MODIFY_QOS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0); if (res < 0) return -EACCES; @@ -442,7 +442,7 @@ octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg) hdr.msg = MBOX_PKI_PORT_DELETE_QOS; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0); if (res < 0) return -EACCES; @@ -464,7 +464,7 @@ octeontx_pki_port_close(int port) hdr.msg = MBOX_PKI_PORT_CLOSE; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0); if (res < 0) return -EACCES; @@ -486,7 +486,7 @@ octeontx_pki_port_start(int port) hdr.msg = MBOX_PKI_PORT_START; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0); if (res < 0) return -EACCES; @@ -508,7 +508,7 @@ octeontx_pki_port_stop(int port) hdr.msg = MBOX_PKI_PORT_STOP; hdr.vfid = port; - res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0); + res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0); if (res < 0) return -EACCES; diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c index b739c0b3..1eb453b2 100644 --- a/drivers/net/octeontx/octeontx_ethdev.c +++ b/drivers/net/octeontx/octeontx_ethdev.c @@ -122,7 +122,7 @@ octeontx_port_open(struct octeontx_nic *nic) int res; res = 0; - + memset(&bgx_port_conf, 0x0, sizeof(bgx_port_conf)); PMD_INIT_FUNC_TRACE(); res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf); @@ -283,34 +283,14 @@ octeontx_dev_configure(struct rte_eth_dev *dev) return -EINVAL; } - if (!rxmode->hw_strip_crc) { + if (!(rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { PMD_INIT_LOG(NOTICE, "can't disable hw crc strip"); - rxmode->hw_strip_crc = 1; - } - - if (rxmode->hw_ip_checksum) { - PMD_INIT_LOG(NOTICE, "rxcksum not supported"); - rxmode->hw_ip_checksum = 0; - } - - if (rxmode->split_hdr_size) { - octeontx_log_err("rxmode does not support split header"); - return -EINVAL; + rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP; } - if (rxmode->hw_vlan_filter) { - octeontx_log_err("VLAN filter not supported"); - return -EINVAL; - } - - if (rxmode->hw_vlan_extend) { - octeontx_log_err("VLAN extended not supported"); - return -EINVAL; - } - - if (rxmode->enable_lro) { - octeontx_log_err("LRO not supported"); - return -EINVAL; + if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) { + PMD_INIT_LOG(NOTICE, "cant disable lockfree tx"); + txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE; } if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { @@ -488,20 +468,6 @@ octeontx_dev_promisc_disable(struct rte_eth_dev *dev) octeontx_port_promisc_set(nic, 0); } -static inline int -octeontx_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - static int octeontx_port_link_status(struct octeontx_nic *nic) { @@ -532,7 +498,6 @@ octeontx_dev_link_update(struct rte_eth_dev *dev, struct rte_eth_link link; int res; - res = 0; PMD_INIT_FUNC_TRACE(); res = octeontx_port_link_status(nic); @@ -566,6 +531,7 @@ octeontx_dev_link_update(struct rte_eth_dev *dev, case OCTEONTX_LINK_SPEED_RESERVE1: case OCTEONTX_LINK_SPEED_RESERVE2: default: + link.link_speed = ETH_SPEED_NUM_NONE; octeontx_log_err("incorrect link speed %d", nic->speed); break; } @@ -573,7 +539,7 @@ octeontx_dev_link_update(struct rte_eth_dev *dev, link.link_duplex = ETH_LINK_FULL_DUPLEX; link.link_autoneg = ETH_LINK_AUTONEG; - return octeontx_atomic_write_link_status(dev, &link); + return rte_eth_linkstatus_set(dev, &link); } static int @@ -594,7 +560,7 @@ octeontx_dev_stats_reset(struct rte_eth_dev *dev) octeontx_port_stats_clr(nic); } -static void +static int octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) { @@ -605,6 +571,8 @@ octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev, if (ret != 0) octeontx_log_err("failed to set MAC address on port %d", nic->port_id); + + return ret; } static void @@ -619,28 +587,25 @@ octeontx_dev_info(struct rte_eth_dev *dev, ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G; - dev_info->driver_name = RTE_STR(rte_octeontx_pmd); dev_info->max_mac_addrs = 1; dev_info->max_rx_pktlen = PKI_MAX_PKTLEN; dev_info->max_rx_queues = 1; dev_info->max_tx_queues = PKO_MAX_NUM_DQ; dev_info->min_rx_bufsize = 0; - dev_info->pci_dev = NULL; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = 0, .rx_drop_en = 0, + .offloads = OCTEONTX_RX_OFFLOADS, }; dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = 0, - .txq_flags = - ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS | - ETH_TXQ_FLAGS_NOXSUMS, + .offloads = OCTEONTX_TX_OFFLOADS, }; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MT_LOCKFREE; + dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS; + dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS; } static void @@ -744,7 +709,7 @@ octeontx_dev_tx_queue_release(void *tx_queue) static int octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf __rte_unused) { struct octeontx_nic *nic = octeontx_pmd_priv(dev); struct octeontx_txq *txq = NULL; @@ -753,7 +718,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, RTE_SET_USED(nb_desc); RTE_SET_USED(socket_id); - RTE_SET_USED(tx_conf); dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx; @@ -1039,7 +1003,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, char octtx_name[OCTEONTX_MAX_NAME_LEN]; struct octeontx_nic *nic = NULL; struct rte_eth_dev *eth_dev = NULL; - struct rte_eth_dev_data *data = NULL; + struct rte_eth_dev_data *data; const char *name = rte_vdev_device_name(dev); PMD_INIT_FUNC_TRACE(); @@ -1052,16 +1016,10 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, eth_dev->tx_pkt_burst = octeontx_xmit_pkts; eth_dev->rx_pkt_burst = octeontx_recv_pkts; + rte_eth_dev_probing_finish(eth_dev); return 0; } - data = rte_zmalloc_socket(octtx_name, sizeof(*data), 0, socket_id); - if (data == NULL) { - octeontx_log_err("failed to allocate devdata"); - res = -ENOMEM; - goto err; - } - nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id); if (nic == NULL) { octeontx_log_err("failed to allocate nic structure"); @@ -1097,11 +1055,9 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, eth_dev->data->kdrv = RTE_KDRV_NONE; eth_dev->data->numa_node = dev->device.numa_node; - rte_memcpy(data, (eth_dev)->data, sizeof(*data)); + data = eth_dev->data; data->dev_private = nic; - data->port_id = eth_dev->data->port_id; - snprintf(data->name, sizeof(data->name), "%s", eth_dev->data->name); nic->ev_queues = 1; nic->ev_ports = 1; @@ -1120,7 +1076,6 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, goto err; } - eth_dev->data = data; eth_dev->dev_ops = &octeontx_dev_ops; /* Finally save ethdev pointer to the NIC structure */ @@ -1146,10 +1101,11 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7] [(nic->base_ochan >> 4) & 0xF] = data->port_id; + rte_eth_dev_probing_finish(eth_dev); return data->port_id; err: - if (port) + if (nic) octeontx_port_close(nic); if (eth_dev != NULL) { @@ -1188,7 +1144,6 @@ octeontx_remove(struct rte_vdev_device *dev) rte_free(eth_dev->data->mac_addrs); rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); rte_event_dev_close(nic->evdev); } @@ -1210,12 +1165,27 @@ octeontx_probe(struct rte_vdev_device *dev) struct rte_event_dev_config dev_conf; const char *eventdev_name = "event_octeontx"; struct rte_event_dev_info info; + struct rte_eth_dev *eth_dev; struct octeontx_vdev_init_params init_params = { OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT }; dev_name = rte_vdev_device_name(dev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(dev_name); + if (!eth_dev) { + RTE_LOG(ERR, PMD, "Failed to probe %s\n", dev_name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &octeontx_dev_ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + res = octeontx_parse_vdev_init_params(&init_params, dev); if (res < 0) return -EINVAL; diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h index 10e42e14..14f16969 100644 --- a/drivers/net/octeontx/octeontx_ethdev.h +++ b/drivers/net/octeontx/octeontx_ethdev.h @@ -28,6 +28,10 @@ #define OCTEONTX_MAX_BGX_PORTS 4 #define OCTEONTX_MAX_LMAC_PER_BGX 4 +#define OCTEONTX_RX_OFFLOADS (DEV_RX_OFFLOAD_CRC_STRIP \ + | DEV_RX_OFFLOAD_CHECKSUM) +#define OCTEONTX_TX_OFFLOADS DEV_TX_OFFLOAD_MT_LOCKFREE + static inline struct octeontx_nic * octeontx_pmd_priv(struct rte_eth_dev *dev) { diff --git a/drivers/net/pcap/meson.build b/drivers/net/pcap/meson.build index 8b81214e..0c4e0201 100644 --- a/drivers/net/pcap/meson.build +++ b/drivers/net/pcap/meson.build @@ -1,22 +1,12 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -if meson.is_cross_build() - pcap_dep = cc.find_library('pcap', required: false) - if pcap_dep.found() - ext_deps += pcap_dep - else - build = false - endif +pcap_dep = cc.find_library('pcap', required: false) +if pcap_dep.found() and cc.has_header('pcap.h', dependencies: pcap_dep) + build = true else - pcap_dep = dependency('pcap', required: false) - if pcap_dep.found() == true - ext_deps += pcap_dep - elif find_program('pcap-config', required: false).found() == true - ext_deps += cc.find_library('pcap') - else - build = false - endif + build = false endif sources = files('rte_eth_pcap.c') +ext_deps += pcap_dep pkgconfig_extra_libs += '-lpcap' diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index c1571e1f..6bd4a7d7 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -96,9 +96,15 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG, + .link_autoneg = ETH_LINK_FIXED, }; +static int eth_pcap_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + static int eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf, const u_char *data, uint16_t data_len) @@ -256,8 +262,8 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) pcap_dump((u_char *)dumper_q->dumper, &header, tx_pcap_data); } else { - RTE_LOG(ERR, PMD, - "Dropping PCAP packet. Size (%d) > max jumbo size (%d).\n", + PMD_LOG(ERR, + "Dropping PCAP packet. Size (%d) > max jumbo size (%d).", mbuf->pkt_len, ETHER_MAX_JUMBO_FRAME_LEN); @@ -313,8 +319,8 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) ret = pcap_sendpacket(tx_queue->pcap, tx_pcap_data, mbuf->pkt_len); } else { - RTE_LOG(ERR, PMD, - "Dropping PCAP packet. Size (%d) > max jumbo size (%d).\n", + PMD_LOG(ERR, + "Dropping PCAP packet. Size (%d) > max jumbo size (%d).", mbuf->pkt_len, ETHER_MAX_JUMBO_FRAME_LEN); @@ -346,7 +352,7 @@ open_iface_live(const char *iface, pcap_t **pcap) { RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf); if (*pcap == NULL) { - RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf); + PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf); return -1; } @@ -357,7 +363,7 @@ static int open_single_iface(const char *iface, pcap_t **pcap) { if (open_iface_live(iface, pcap) < 0) { - RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface); + PMD_LOG(ERR, "Couldn't open interface %s", iface); return -1; } @@ -376,7 +382,7 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper) */ tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN); if (tx_pcap == NULL) { - RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n"); + PMD_LOG(ERR, "Couldn't create dead pcap"); return -1; } @@ -384,7 +390,7 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper) *dumper = pcap_dump_open(tx_pcap, pcap_filename); if (*dumper == NULL) { pcap_close(tx_pcap); - RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", + PMD_LOG(ERR, "Couldn't open %s for writing.", pcap_filename); return -1; } @@ -398,7 +404,7 @@ open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap) { *pcap = pcap_open_offline(pcap_filename, errbuf); if (*pcap == NULL) { - RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, + PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename, errbuf); return -1; } @@ -773,27 +779,16 @@ pmd_init_internals(struct rte_vdev_device *vdev, struct pmd_internals **internals, struct rte_eth_dev **eth_dev) { - struct rte_eth_dev_data *data = NULL; + struct rte_eth_dev_data *data; unsigned int numa_node = vdev->device.numa_node; - const char *name; - name = rte_vdev_device_name(vdev); - RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %d\n", + PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d", numa_node); - /* now do all data allocation - for eth_dev structure - * and internal (private) data - */ - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (data == NULL) - return -1; - /* reserve an ethdev entry */ *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals)); - if (*eth_dev == NULL) { - rte_free(data); + if (!(*eth_dev)) return -1; - } /* now put it all together * - store queue data in internals, @@ -802,7 +797,7 @@ pmd_init_internals(struct rte_vdev_device *vdev, * - and point eth_dev structure to new eth_dev_data structure */ *internals = (*eth_dev)->data->dev_private; - rte_memcpy(data, (*eth_dev)->data, sizeof(*data)); + data = (*eth_dev)->data; data->nb_rx_queues = (uint16_t)nb_rx_queues; data->nb_tx_queues = (uint16_t)nb_tx_queues; data->dev_link = pmd_link; @@ -812,7 +807,6 @@ pmd_init_internals(struct rte_vdev_device *vdev, * NOTE: we'll replace the data element, of originally allocated * eth_dev so the rings are local per-process */ - (*eth_dev)->data = data; (*eth_dev)->dev_ops = &ops; return 0; @@ -899,6 +893,7 @@ eth_from_pcaps(struct rte_vdev_device *vdev, else eth_dev->tx_pkt_burst = eth_pcap_tx; + rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -910,16 +905,30 @@ pmd_pcap_probe(struct rte_vdev_device *dev) struct rte_kvargs *kvlist; struct pmd_devargs pcaps = {0}; struct pmd_devargs dumpers = {0}; + struct rte_eth_dev *eth_dev; int single_iface = 0; int ret; name = rte_vdev_device_name(dev); - RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name); + PMD_LOG(INFO, "Initializing pmd_pcap for %s", name); gettimeofday(&start_time, NULL); start_cycles = rte_get_timer_cycles(); hz = rte_get_timer_hz(); + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); if (kvlist == NULL) return -1; @@ -1008,7 +1017,7 @@ pmd_pcap_remove(struct rte_vdev_device *dev) { struct rte_eth_dev *eth_dev = NULL; - RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %d\n", + PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d", rte_socket_id()); if (!dev) @@ -1020,7 +1029,6 @@ pmd_pcap_remove(struct rte_vdev_device *dev) return -1; rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); rte_eth_dev_release_port(eth_dev); @@ -1040,3 +1048,12 @@ RTE_PMD_REGISTER_PARAM_STRING(net_pcap, ETH_PCAP_RX_IFACE_ARG "= " ETH_PCAP_TX_IFACE_ARG "= " ETH_PCAP_IFACE_ARG "="); + +RTE_INIT(eth_pcap_init_log); +static void +eth_pcap_init_log(void) +{ + eth_pcap_logtype = rte_log_register("pmd.net.pcap"); + if (eth_pcap_logtype >= 0) + rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/qede/LICENSE.qede_pmd b/drivers/net/qede/LICENSE.qede_pmd index c7cbdccc..022085a1 100644 --- a/drivers/net/qede/LICENSE.qede_pmd +++ b/drivers/net/qede/LICENSE.qede_pmd @@ -1,28 +1,3 @@ -/* - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of QLogic Corporation nor the name of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written consent. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. */ diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile index ccbffa45..c30a867e 100644 --- a/drivers/net/qede/Makefile +++ b/drivers/net/qede/Makefile @@ -1,6 +1,6 @@ -# Copyright (c) 2016 QLogic Corporation. +# Copyright (c) 2016 - 2018 Cavium Inc. # All rights reserved. -# www.qlogic.com +# www.cavium.com # # See LICENSE.qede_pmd for copyright and licensing details. @@ -73,8 +73,7 @@ ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion endif else #ICC -CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type -CFLAGS_qede_ethdev.o += -wd279 #279: controlling expression is constant +CFLAGS_qede_ethdev.o += -diag-disable 279 #279: controlling expression is constant endif # diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c index fe42f325..ca1c2b11 100644 --- a/drivers/net/qede/base/bcm_osal.c +++ b/drivers/net/qede/base/bcm_osal.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -133,10 +133,10 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev, snprintf(mz_name, sizeof(mz_name) - 1, "%lx", (unsigned long)rte_get_timer_cycles()); if (core_id == (unsigned int)LCORE_ID_ANY) - core_id = 0; + core_id = rte_get_master_lcore(); socket_id = rte_lcore_to_socket_id(core_id); - mz = rte_memzone_reserve_aligned(mz_name, size, - socket_id, 0, RTE_CACHE_LINE_SIZE); + mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE); if (!mz) { DP_ERR(p_dev, "Unable to allocate DMA memory " "of size %zu bytes - %s\n", @@ -172,9 +172,10 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev, snprintf(mz_name, sizeof(mz_name) - 1, "%lx", (unsigned long)rte_get_timer_cycles()); if (core_id == (unsigned int)LCORE_ID_ANY) - core_id = 0; + core_id = rte_get_master_lcore(); socket_id = rte_lcore_to_socket_id(core_id); - mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align); + mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, align); if (!mz) { DP_ERR(p_dev, "Unable to allocate DMA memory " "of size %zu bytes - %s\n", diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h index 52c2f0ec..27090c79 100644 --- a/drivers/net/qede/base/bcm_osal.h +++ b/drivers/net/qede/base/bcm_osal.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h index 9a6059ac..52507844 100644 --- a/drivers/net/qede/base/common_hsi.h +++ b/drivers/net/qede/base/common_hsi.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -96,10 +96,10 @@ /****************************************************************************/ -#define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 30 -#define FW_REVISION_VERSION 12 -#define FW_ENGINEERING_VERSION 0 +#define FW_MAJOR_VERSION 8 +#define FW_MINOR_VERSION 33 +#define FW_REVISION_VERSION 12 +#define FW_ENGINEERING_VERSION 0 /***********************/ /* COMMON HW CONSTANTS */ diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h index ce5f3a90..57d6aa95 100644 --- a/drivers/net/qede/base/ecore.h +++ b/drivers/net/qede/base/ecore.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -41,6 +41,9 @@ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION) +#define IS_ECORE_PACING(p_hwfn) \ + (!!(p_hwfn->b_en_pacing)) + #define MAX_HWFNS_PER_DEVICE 2 #define NAME_SIZE 128 /* @DPDK */ #define ECORE_WFQ_UNIT 100 @@ -432,8 +435,10 @@ struct ecore_hw_info { #define DMAE_MAX_RW_SIZE 0x2000 struct ecore_dmae_info { - /* Mutex for synchronizing access to functions */ - osal_mutex_t mutex; + /* Spinlock for synchronizing access to functions */ + osal_spinlock_t lock; + + bool b_mem_ready; u8 channel; @@ -534,6 +539,12 @@ enum ecore_mf_mode_bit { ECORE_MF_UFP_SPECIFIC, ECORE_MF_DISABLE_ARFS, + + /* Use vlan for steering */ + ECORE_MF_8021Q_TAGGING, + + /* Use stag for steering */ + ECORE_MF_8021AD_TAGGING, }; enum ecore_ufp_mode { @@ -672,6 +683,13 @@ struct ecore_hwfn { /* Mechanism for recovering from doorbell drop */ struct ecore_db_recovery_info db_recovery_info; + /* Enable/disable pacing, if request to enable then + * IOV and mcos configuration will be skipped. + * this actually reflects the value requested in + * struct ecore_hw_prepare_params by ecore client. + */ + bool b_en_pacing; + /* @DPDK */ struct ecore_ptt *p_arfs_ptt; }; @@ -924,12 +942,16 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, #define PQ_FLAGS_ACK (1 << 4) #define PQ_FLAGS_OFLD (1 << 5) #define PQ_FLAGS_VFS (1 << 6) +#define PQ_FLAGS_LLT (1 << 7) /* physical queue index for cm context intialization */ u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags); u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc); u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf); -u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid); +u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl); + +/* qm vport for rate limit configuration */ +u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl); const char *ecore_hw_get_resc_name(enum ecore_resources res_id); diff --git a/drivers/net/qede/base/ecore_attn_values.h b/drivers/net/qede/base/ecore_attn_values.h index d8951bcc..d893e0a6 100644 --- a/drivers/net/qede/base/ecore_attn_values.h +++ b/drivers/net/qede/base/ecore_attn_values.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h index d8f69ad6..0b797460 100644 --- a/drivers/net/qede/base/ecore_chain.h +++ b/drivers/net/qede/base/ecore_chain.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -526,7 +526,7 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain) p_chain->p_prod_elem = p_chain->p_virt_addr; if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { - /* Use (page_cnt - 1) as a reset value for the prod/cons page's + /* Use "page_cnt-1" as a reset value for the prod/cons page's * indices, to avoid unnecessary page advancing on the first * call to ecore_chain_produce/consume. Instead, the indices * will be advanced to page_cnt and then will be wrapped to 0. @@ -726,6 +726,21 @@ out: static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain, u32 prod_idx, void *p_prod_elem) { + if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { + /* Use "prod_idx-1" since ecore_chain_produce() advances the + * page index before the producer index when getting to + * "next_page_mask". + */ + u32 elem_idx = + (prod_idx - 1 + p_chain->capacity) % p_chain->capacity; + u32 page_idx = elem_idx / p_chain->elem_per_page; + + if (is_chain_u16(p_chain)) + p_chain->pbl.c.u16.prod_page_idx = (u16)page_idx; + else + p_chain->pbl.c.u32.prod_page_idx = page_idx; + } + if (is_chain_u16(p_chain)) p_chain->u.chain16.prod_idx = (u16)prod_idx; else @@ -733,6 +748,38 @@ static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain, p_chain->p_prod_elem = p_prod_elem; } +/** + * @brief ecore_chain_set_cons - sets the cons to the given value + * + * @param cons_idx + * @param p_cons_elem + */ +static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain, + u32 cons_idx, void *p_cons_elem) +{ + if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { + /* Use "cons_idx-1" since ecore_chain_consume() advances the + * page index before the consumer index when getting to + * "next_page_mask". + */ + u32 elem_idx = + (cons_idx - 1 + p_chain->capacity) % p_chain->capacity; + u32 page_idx = elem_idx / p_chain->elem_per_page; + + if (is_chain_u16(p_chain)) + p_chain->pbl.c.u16.cons_page_idx = (u16)page_idx; + else + p_chain->pbl.c.u32.cons_page_idx = page_idx; + } + + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx = (u16)cons_idx; + else + p_chain->u.chain32.cons_idx = cons_idx; + + p_chain->p_cons_elem = p_cons_elem; +} + /** * @brief ecore_chain_pbl_zero_mem - set chain memory to 0 * diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c index 50bd66da..a91b2ff3 100644 --- a/drivers/net/qede/base/ecore_cxt.c +++ b/drivers/net/qede/base/ecore_cxt.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -834,7 +834,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn) p_mngr->t2_num_pages * sizeof(struct ecore_dma_mem)); if (!p_mngr->t2) { - DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n"); rc = ECORE_NOMEM; goto t2_fail; } @@ -919,6 +919,9 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn) struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 ilt_size, i; + if (p_mngr->ilt_shadow == OSAL_NULL) + return; + ilt_size = ecore_cxt_ilt_shadow_size(p_cli); for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { @@ -931,6 +934,7 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn) p_dma->p_virt = OSAL_NULL; } OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow); + p_mngr->ilt_shadow = OSAL_NULL; } static enum _ecore_status_t @@ -1000,8 +1004,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn) size * sizeof(struct ecore_dma_mem)); if (!p_mngr->ilt_shadow) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate ilt shadow table\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n"); rc = ECORE_NOMEM; goto ilt_shadow_fail; } @@ -1044,12 +1047,14 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn) for (type = 0; type < MAX_CONN_TYPES; type++) { OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map); + p_mngr->acquired[type].cid_map = OSAL_NULL; p_mngr->acquired[type].max_count = 0; p_mngr->acquired[type].start_cid = 0; for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) { OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired_vf[type][vf].cid_map); + p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL; p_mngr->acquired_vf[type][vf].max_count = 0; p_mngr->acquired_vf[type][vf].start_cid = 0; } @@ -1126,8 +1131,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn) p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr)); if (!p_mngr) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_cxt_mngr'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n"); return ECORE_NOMEM; } @@ -1189,21 +1193,21 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn) /* Allocate the ILT shadow table */ rc = ecore_ilt_shadow_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n"); goto tables_alloc_fail; } /* Allocate the T2 table */ rc = ecore_cxt_src_t2_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n"); goto tables_alloc_fail; } /* Allocate and initialize the acquired cids bitmaps */ rc = ecore_cid_map_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n"); goto tables_alloc_fail; } @@ -1427,7 +1431,8 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn) } } -void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool is_pf_loading) { struct ecore_qm_info *qm_info = &p_hwfn->qm_info; struct ecore_mcp_link_state *p_link; @@ -1438,8 +1443,9 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output; - ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id, - p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port, + ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + qm_info->max_phys_tcs_per_port, + is_pf_loading, iids.cids, iids.vf_cids, iids.tids, qm_info->start_pq, qm_info->num_pqs - qm_info->num_vf_pqs, @@ -1797,7 +1803,7 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn) void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { - ecore_qm_init_pf(p_hwfn, p_ptt); + ecore_qm_init_pf(p_hwfn, p_ptt, true); ecore_cm_init_pf(p_hwfn); ecore_dq_init_pf(p_hwfn); ecore_cdu_init_pf(p_hwfn); diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h index 54761e4e..3bcbe8f1 100644 --- a/drivers/net/qede/base/ecore_cxt.h +++ b/drivers/net/qede/base/ecore_cxt.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -107,8 +107,10 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); * * @param p_hwfn * @param p_ptt + * @param is_pf_loading */ -void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); +void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool is_pf_loading); /** * @brief Reconfigures QM pf on the fly diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h index 6d87620d..65509add 100644 --- a/drivers/net/qede/base/ecore_cxt_api.h +++ b/drivers/net/qede/base/ecore_cxt_api.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c index 21ddda92..9b8d39f6 100644 --- a/drivers/net/qede/base/ecore_dcbx.c +++ b/drivers/net/qede/base/ecore_dcbx.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -149,6 +149,10 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data, } p_data->arr[type].update = UPDATE_DCB_DSCP; + /* Do not add valn tag 0 when DCB is enabled and port is in UFP mode */ + if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) + p_data->arr[type].dont_add_vlan0 = true; + /* QM reconf data */ if (p_hwfn->hw_info.personality == personality) p_hwfn->hw_info.offload_tc = tc; @@ -910,7 +914,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn) p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_hwfn->p_dcbx_info)); if (!p_hwfn->p_dcbx_info) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_dcbx_info'"); return ECORE_NOMEM; } @@ -935,6 +939,7 @@ static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, p_data->dcb_tc = p_src->arr[type].tc; p_data->dscp_enable_flag = p_src->arr[type].dscp_enable; p_data->dscp_val = p_src->arr[type].dscp_val; + p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; } /* Set pf update ramrod command params */ diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h index 469e42dd..49df62ce 100644 --- a/drivers/net/qede/base/ecore_dcbx.h +++ b/drivers/net/qede/base/ecore_dcbx.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h index 9ff4df4c..2ad1def4 100644 --- a/drivers/net/qede/base/ecore_dcbx_api.h +++ b/drivers/net/qede/base/ecore_dcbx_api.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -29,6 +29,7 @@ struct ecore_dcbx_app_data { u8 tc; /* Traffic Class */ bool dscp_enable; /* DSCP enabled */ u8 dscp_val; /* DSCP value */ + bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */ }; #ifndef __EXTRACT__LINUX__ diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c index 744d2043..4ebbedd6 100644 --- a/drivers/net/qede/base/ecore_dev.c +++ b/drivers/net/qede/base/ecore_dev.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -39,7 +39,7 @@ * there's more than a single compiled ecore component in system]. */ static osal_spinlock_t qm_lock; -static bool qm_lock_init; +static u32 qm_lock_ref_cnt; /******************** Doorbell Recovery *******************/ /* The doorbell recovery mechanism consists of a list of entries which represent @@ -227,7 +227,8 @@ enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn) OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list); #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock)) + return ECORE_NOMEM; #endif OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock); p_hwfn->db_recovery_info.db_recovery_counter = 0; @@ -411,7 +412,7 @@ void ecore_init_dp(struct ecore_dev *p_dev, } } -void ecore_init_struct(struct ecore_dev *p_dev) +enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev) { u8 i; @@ -423,9 +424,10 @@ void ecore_init_struct(struct ecore_dev *p_dev) p_hwfn->b_active = false; #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock)) + goto handle_err; #endif - OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock); } /* hwfn 0 is always active */ @@ -433,6 +435,17 @@ void ecore_init_struct(struct ecore_dev *p_dev) /* set the default cache alignment to 128 (may be overridden later) */ p_dev->cache_shift = 7; + return ECORE_SUCCESS; +#ifdef CONFIG_ECORE_LOCK_ALLOC +handle_err: + while (--i) { + struct ecore_hwfn *p_hwfn = OSAL_NULL; + + p_hwfn = &p_dev->hwfns[i]; + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); + } + return ECORE_NOMEM; +#endif } static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) @@ -500,11 +513,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) /* feature flags */ if (IS_ECORE_SRIOV(p_hwfn->p_dev)) flags |= PQ_FLAGS_VFS; + if (IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_RLS; /* protocol flags */ switch (p_hwfn->hw_info.personality) { case ECORE_PCI_ETH: - flags |= PQ_FLAGS_MCOS; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; break; case ECORE_PCI_FCOE: flags |= PQ_FLAGS_OFLD; @@ -513,11 +529,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; break; case ECORE_PCI_ETH_ROCE: - flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD; + flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; break; case ECORE_PCI_ETH_IWARP: - flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | - PQ_FLAGS_OFLD; + flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; break; default: DP_ERR(p_hwfn, "unknown personality %d\n", @@ -721,6 +740,7 @@ static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); /* init pq params */ + qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id; qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; qm_info->qm_pq_params[pq_idx].tc_id = tc; @@ -823,7 +843,7 @@ u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; } -u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) +u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl) { u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); @@ -833,6 +853,23 @@ u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; } +u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl) +{ + u16 start_pq, pq, qm_pq_idx; + + pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl); + start_pq = p_hwfn->qm_info.start_pq; + qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE; + + if (qm_pq_idx > p_hwfn->qm_info.num_pqs) { + DP_ERR(p_hwfn, + "qm_pq_idx %d must be smaller than %d\n", + qm_pq_idx, p_hwfn->qm_info.num_pqs); + } + + return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id; +} + /* Functions for creating specific types of pqs */ static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) { @@ -1025,10 +1062,9 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) for (i = 0; i < qm_info->num_pqs; i++) { pq = &qm_info->qm_pq_params[i]; DP_VERBOSE(p_hwfn, ECORE_MSG_HW, - "pq idx %d, vport_id %d, tc %d, wrr_grp %d," - " rl_valid %d\n", - qm_info->start_pq + i, pq->vport_id, pq->tc_id, - pq->wrr_group, pq->rl_valid); + "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", + qm_info->start_pq + i, pq->port_id, pq->vport_id, + pq->tc_id, pq->wrr_group, pq->rl_valid); } } @@ -1083,7 +1119,7 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, ecore_init_clear_rt_data(p_hwfn); /* prepare QM portion of runtime array */ - ecore_qm_init_pf(p_hwfn, p_ptt); + ecore_qm_init_pf(p_hwfn, p_ptt, false); /* activate init tool on runtime array */ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, @@ -1289,16 +1325,14 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) /* DMA info initialization */ rc = ecore_dmae_info_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate memory for dmae_info" - " structure\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n"); goto alloc_err; } /* DCBX initialization */ rc = ecore_dcbx_info_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dcbx structure\n"); goto alloc_err; } @@ -1307,7 +1341,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_dev->reset_stats)); if (!p_dev->reset_stats) { - DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n"); + DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n"); goto alloc_no_mem; } @@ -1658,7 +1692,8 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, ecore_init_cache_line_size(p_hwfn, p_ptt); - rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn), + hw_mode); if (rc != ECORE_SUCCESS) return rc; @@ -2160,6 +2195,11 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, /* perform debug configuration when chip is out of reset */ OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); + /* Sanity check before the PF init sequence that uses DMAE */ + rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); + if (rc) + return rc; + /* PF Init sequence */ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); if (rc) @@ -2205,42 +2245,43 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n"); } else { - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); - - if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, - (1 << 2)); - ecore_wr(p_hwfn, p_ptt, - PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, - 0x100); - } - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH registers after start PFn\n"); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TCP: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_UDP: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, - PRS_REG_SEARCH_TCP_FIRST_FRAG); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", - prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); + return rc; + } + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); + + if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, + (1 << 2)); + ecore_wr(p_hwfn, p_ptt, + PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, + 0x100); } + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH registers after start PFn\n"); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TCP: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_UDP: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, + PRS_REG_SEARCH_TCP_FIRST_FRAG); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", + prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); } - return rc; + return ECORE_SUCCESS; } enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn, @@ -2283,14 +2324,15 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, } static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt) + struct ecore_ptt *p_ptt) { ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); } -static void -ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, +static enum _ecore_status_t +ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn, + struct ecore_load_req_params *p_load_req, struct ecore_drv_load_params *p_drv_load) { /* Make sure that if ecore-client didn't provide inputs, all the @@ -2302,15 +2344,51 @@ ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); - if (p_drv_load != OSAL_NULL) { - p_load_req->drv_role = p_drv_load->is_crash_kernel ? - ECORE_DRV_ROLE_KDUMP : - ECORE_DRV_ROLE_OS; + if (p_drv_load == OSAL_NULL) + goto out; + + p_load_req->drv_role = p_drv_load->is_crash_kernel ? + ECORE_DRV_ROLE_KDUMP : + ECORE_DRV_ROLE_OS; + p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; + p_load_req->override_force_load = p_drv_load->override_force_load; + + /* Old MFW versions don't support timeout values other than default and + * none, so these values are replaced according to the fall-back action. + */ + + if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT || + p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE || + (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) { p_load_req->timeout_val = p_drv_load->mfw_timeout_val; - p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; - p_load_req->override_force_load = - p_drv_load->override_force_load; + goto out; } + + switch (p_drv_load->mfw_timeout_fallback) { + case ECORE_TO_FALLBACK_TO_NONE: + p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE; + break; + case ECORE_TO_FALLBACK_TO_DEFAULT: + p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; + break; + case ECORE_TO_FALLBACK_FAIL_LOAD: + DP_NOTICE(p_hwfn, false, + "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n", + p_drv_load->mfw_timeout_val, + ECORE_LOAD_REQ_LOCK_TO_DEFAULT, + ECORE_LOAD_REQ_LOCK_TO_NONE); + return ECORE_ABORTED; + } + + DP_INFO(p_hwfn, + "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n", + p_drv_load->mfw_timeout_val, + (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ? + "default" : "none", + p_load_req->timeout_val); +out: + return ECORE_SUCCESS; } enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, @@ -2366,12 +2444,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, if (rc != ECORE_SUCCESS) return rc; - ecore_fill_load_req_params(&load_req_params, - p_params->p_drv_load_params); + ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms); + + rc = ecore_fill_load_req_params(p_hwfn, &load_req_params, + p_params->p_drv_load_params); + if (rc != ECORE_SUCCESS) + return rc; + rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_req_params); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed sending a LOAD_REQ command\n"); return rc; } @@ -2404,10 +2487,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, p_hwfn->first_on_engine = (load_code == FW_MSG_CODE_DRV_LOAD_ENGINE); - if (!qm_lock_init) { + if (!qm_lock_ref_cnt) { +#ifdef CONFIG_ECORE_LOCK_ALLOC + rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock); + if (rc) { + DP_ERR(p_hwfn, "qm_lock allocation failed\n"); + goto qm_lock_fail; + } +#endif OSAL_SPIN_LOCK_INIT(&qm_lock); - qm_lock_init = true; } + ++qm_lock_ref_cnt; /* Clean up chip from previous driver if such remains exist. * This is not needed when the PF is the first one on the @@ -2424,7 +2514,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, } /* Log and clean previous pglue_b errors if such exist */ - ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); + ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); /* Enable the PF's internal FID_enable in the PXP */ @@ -2462,15 +2552,23 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, } if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "init phase failed for loadcode 0x%x (rc %d)\n", load_code, rc); goto load_err; } rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); - if (rc != ECORE_SUCCESS) + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Sending load done failed, rc = %d\n", rc); + if (rc == ECORE_NOMEM) { + DP_NOTICE(p_hwfn, false, + "Sending load done was failed due to memory allocation failure\n"); + goto load_err; + } return rc; + } /* send DCBX attention request command */ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, @@ -2480,7 +2578,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp, ¶m); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to send DCBX attention request\n"); return rc; } @@ -2513,6 +2611,12 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, return rc; load_err: + --qm_lock_ref_cnt; +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (!qm_lock_ref_cnt) + OSAL_SPIN_LOCK_DEALLOC(&qm_lock); +qm_lock_fail: +#endif /* The MFW load lock should be released regardless of success or failure * of initialization. * TODO: replace this with an attempt to send cancel_load. @@ -2547,8 +2651,8 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev, if (i < ECORE_HW_STOP_RETRY_LIMIT) return; - DP_NOTICE(p_hwfn, true, "Timers linear scans are not over" - " [Connection %02x Tasks %02x]\n", + DP_NOTICE(p_hwfn, false, + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); } @@ -2613,7 +2717,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) if (!p_dev->recov_in_prog) { rc = ecore_mcp_unload_req(p_hwfn, p_ptt); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed sending a UNLOAD_REQ command. rc = %d.\n", rc); rc2 = ECORE_UNKNOWN_ERROR; @@ -2628,7 +2732,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) rc = ecore_sp_pf_stop(p_hwfn); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", rc); rc2 = ECORE_UNKNOWN_ERROR; @@ -2682,10 +2786,21 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); + --qm_lock_ref_cnt; +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (!qm_lock_ref_cnt) + OSAL_SPIN_LOCK_DEALLOC(&qm_lock); +#endif + if (!p_dev->recov_in_prog) { - ecore_mcp_unload_done(p_hwfn, p_ptt); + rc = ecore_mcp_unload_done(p_hwfn, p_ptt); + if (rc == ECORE_NOMEM) { + DP_NOTICE(p_hwfn, false, + "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n"); + rc = ecore_mcp_unload_done(p_hwfn, p_ptt); + } if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed sending a UNLOAD_DONE command. rc = %d.\n", rc); rc2 = ECORE_UNKNOWN_ERROR; @@ -2936,7 +3051,7 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, resc_max_val, p_mcp_resp); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "MFW response failure for a max value setting of resource %d [%s]\n", res_id, ecore_hw_get_resc_name(res_id)); return rc; @@ -3496,9 +3611,14 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, break; case NVM_CFG1_GLOB_MF_MODE_UFP: p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | - 1 << ECORE_MF_UFP_SPECIFIC; + 1 << ECORE_MF_UFP_SPECIFIC | + 1 << ECORE_MF_8021Q_TAGGING; + break; + case NVM_CFG1_GLOB_MF_MODE_BD: + p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | + 1 << ECORE_MF_LLH_PROTO_CLSS | + 1 << ECORE_MF_8021AD_TAGGING; break; - case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | 1 << ECORE_MF_LLH_PROTO_CLSS | @@ -3527,6 +3647,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, */ switch (mf_mode) { case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + case NVM_CFG1_GLOB_MF_MODE_BD: p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; break; case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: @@ -3780,8 +3901,13 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, bool drv_resc_alloc = p_params->drv_resc_alloc; enum _ecore_status_t rc; + if (IS_ECORE_PACING(p_hwfn)) { + DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV, + "Skipping IOV as packet pacing is requested\n"); + } + /* Since all information is common, only first hwfns should do this */ - if (IS_LEAD_HWFN(p_hwfn)) { + if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) { rc = ecore_iov_hw_info(p_hwfn); if (rc != ECORE_SUCCESS) { if (p_params->b_relaxed_probe) @@ -3866,7 +3992,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, * that can result in performance penalty in some cases. 4 * represents a good tradeoff between performance and flexibility. */ - p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; + if (IS_ECORE_PACING(p_hwfn)) + p_hwfn->hw_info.num_hw_tc = 1; + else + p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; /* start out with a single active tc. This can be increased either * by dcbx negotiation or by upper layer driver @@ -4037,7 +4166,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, /* Allocate PTT pool */ rc = ecore_ptt_pool_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n"); + DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n"); if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; goto err0; @@ -4062,7 +4191,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, /* Initialize MCP structure */ rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n"); + DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n"); if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; goto err1; @@ -4072,7 +4201,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, p_params->personality, p_params); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to get HW information\n"); + DP_NOTICE(p_hwfn, false, "Failed to get HW information\n"); goto err2; } @@ -4115,7 +4244,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, /* Allocate the init RT array and initialize the init-ops engine */ rc = ecore_init_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n"); if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; goto err2; @@ -4153,6 +4282,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, p_dev->chk_reg_fifo = p_params->chk_reg_fifo; p_dev->allow_mdump = p_params->allow_mdump; + p_hwfn->b_en_pacing = p_params->b_en_pacing; if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; @@ -4188,6 +4318,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, BAR_ID_1) / 2; p_doorbell = (void OSAL_IOMEM *)addr; + p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing; /* prepare second hw function */ rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, p_doorbell, p_params); @@ -4205,8 +4336,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, ecore_mcp_free(p_hwfn); ecore_hw_hwfn_free(p_hwfn); } else { - DP_NOTICE(p_dev, true, - "What do we need to free when VF hwfn1 init fails\n"); + DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n"); } return rc; } @@ -4237,7 +4367,7 @@ void ecore_hw_remove(struct ecore_dev *p_dev) ecore_mcp_free(p_hwfn); #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); #endif } @@ -4368,7 +4498,7 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); if (!p_virt) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); return ECORE_NOMEM; } @@ -4401,7 +4531,7 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); if (!p_virt) { - DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n"); + DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); return ECORE_NOMEM; } @@ -4425,7 +4555,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev, size = page_cnt * sizeof(*pp_virt_addr_tbl); pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); if (!pp_virt_addr_tbl) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Failed to allocate memory for the chain virtual addresses table\n"); return ECORE_NOMEM; } @@ -4449,7 +4579,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev, ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_virt_addr_tbl); if (!p_pbl_virt) { - DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n"); + DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n"); return ECORE_NOMEM; } @@ -4457,7 +4587,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev, p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); if (!p_virt) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); return ECORE_NOMEM; } @@ -4497,7 +4627,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, page_cnt); if (rc) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Cannot allocate a chain with the given arguments:\n" "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", intended_use, mode, cnt_type, num_elems, elem_size); diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h index 98bcabe8..0dd78d60 100644 --- a/drivers/net/qede/base/ecore_dev_api.h +++ b/drivers/net/qede/base/ecore_dev_api.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -32,7 +32,7 @@ void ecore_init_dp(struct ecore_dev *p_dev, * * @param p_dev */ -void ecore_init_struct(struct ecore_dev *p_dev); +enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev); /** * @brief ecore_resc_free - @@ -57,6 +57,12 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev); */ void ecore_resc_setup(struct ecore_dev *p_dev); +enum ecore_mfw_timeout_fallback { + ECORE_TO_FALLBACK_TO_NONE, + ECORE_TO_FALLBACK_TO_DEFAULT, + ECORE_TO_FALLBACK_FAIL_LOAD, +}; + enum ecore_override_force_load { ECORE_OVERRIDE_FORCE_LOAD_NONE, ECORE_OVERRIDE_FORCE_LOAD_ALWAYS, @@ -79,6 +85,11 @@ struct ecore_drv_load_params { #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0 #define ECORE_LOAD_REQ_LOCK_TO_NONE 255 + /* Action to take in case the MFW doesn't support timeout values other + * than default and none. + */ + enum ecore_mfw_timeout_fallback mfw_timeout_fallback; + /* Avoid engine reset when first PF loads on it */ bool avoid_eng_reset; @@ -104,6 +115,9 @@ struct ecore_hw_init_params { /* Driver load parameters */ struct ecore_drv_load_params *p_drv_load_params; + + /* SPQ block timeout in msec */ + u32 spq_timeout_ms; }; /** @@ -256,6 +270,9 @@ struct ecore_hw_prepare_params { */ bool b_relaxed_probe; enum ecore_hw_prepare_result p_relaxed_res; + + /* Enable/disable request by ecore client for pacing */ + bool b_en_pacing; }; /** @@ -363,6 +380,7 @@ struct ecore_eth_stats_common { u64 tx_mac_mc_packets; u64 tx_mac_bc_packets; u64 tx_mac_ctrl_frames; + u64 link_change_count; }; struct ecore_eth_stats_bb { diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h index 2acd864d..ac29dc49 100644 --- a/drivers/net/qede/base/ecore_gtt_reg_addr.h +++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h index 2ddc5f19..d9af94df 100644 --- a/drivers/net/qede/base/ecore_gtt_values.h +++ b/drivers/net/qede/base/ecore_gtt_values.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h index d8abd604..d400fa91 100644 --- a/drivers/net/qede/base/ecore_hsi_common.h +++ b/drivers/net/qede/base/ecore_hsi_common.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -381,7 +381,7 @@ struct e4_xstorm_core_conn_ag_ctx { __le16 reserved16 /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_or_spq_prod /* word4 */; - __le16 word5 /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; u8 byte3 /* byte3 */; u8 byte4 /* byte4 */; @@ -904,8 +904,10 @@ struct core_rx_start_ramrod_data { /* if set, 802.1q tags will be removed and copied to CQE */ /* if set, 802.1q tags will be removed and copied to CQE */ u8 inner_vlan_stripping_en; -/* if set, outer tag wont be stripped, valid only in MF OVLAN. */ - u8 outer_vlan_stripping_dis; +/* if set and inner vlan does not exist, the outer vlan will copied to CQE as + * inner vlan. should be used in MF_OVLAN mode only. + */ + u8 report_outer_vlan; u8 queue_id /* Light L2 RX Queue ID */; u8 main_func_queue /* Is this the main queue for the PF */; /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if @@ -946,7 +948,9 @@ struct core_tx_bd_data { /* Do not allow additional VLAN manipulations on this packet (DCB) */ #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0 -/* Insert VLAN into packet */ +/* Insert VLAN into packet. Cannot be set for LB packets + * (tx_dst == CORE_TX_DEST_LB) + */ #define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 #define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1 /* This is the first BD of the packet (for debug) */ @@ -1069,11 +1073,11 @@ struct core_tx_update_ramrod_data { * Enum flag for what type of dcb data to update */ enum dcb_dscp_update_mode { -/* use when no change should be done to dcb data */ +/* use when no change should be done to DCB data */ DONT_UPDATE_DCB_DSCP, - UPDATE_DCB /* use to update only l2 (vlan) priority */, - UPDATE_DSCP /* use to update only l3 dscp */, - UPDATE_DCB_DSCP /* update vlan pri and dscp */, + UPDATE_DCB /* use to update only L2 (vlan) priority */, + UPDATE_DSCP /* use to update only IP DSCP */, + UPDATE_DCB_DSCP /* update vlan pri and DSCP */, MAX_DCB_DSCP_UPDATE_FLAG }; @@ -1291,10 +1295,15 @@ enum fw_flow_ctrl_mode { * GFT profile type. */ enum gft_profile_type { - GFT_PROFILE_TYPE_4_TUPLE /* 4 tuple, IP type and L4 type match. */, -/* L4 destination port, IP type and L4 type match. */ +/* tunnel type, inner 4 tuple, IP type and L4 type match. */ + GFT_PROFILE_TYPE_4_TUPLE, +/* tunnel type, inner L4 destination port, IP type and L4 type match. */ GFT_PROFILE_TYPE_L4_DST_PORT, - GFT_PROFILE_TYPE_IP_DST_PORT /* IP destination port and IP type. */, +/* tunnel type, inner IP destination address and IP type match. */ + GFT_PROFILE_TYPE_IP_DST_ADDR, +/* tunnel type, inner IP source address and IP type match. */ + GFT_PROFILE_TYPE_IP_SRC_ADDR, + GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */, MAX_GFT_PROFILE_TYPE }; @@ -1411,8 +1420,9 @@ struct vlan_header { * outer tag configurations */ struct outer_tag_config_struct { -/* Enables the STAG Priority Change , Should be 1 for Bette Davis and UFP with - * Host Control mode. Else - 0 +/* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette + * Davis, UFP with Host Control mode, and UFP with DCB over base interface. + * else - 0. */ u8 enable_stag_pri_change; /* If inner_to_outer_pri_map is initialize then set pri_map_valid */ @@ -1507,15 +1517,18 @@ struct pf_start_ramrod_data { /* - * Data for port update ramrod + * Per protocol DCB data */ struct protocol_dcb_data { - u8 dcb_enable_flag /* dcbEnable flag value */; - u8 dscp_enable_flag /* If set use dscp value */; - u8 dcb_priority /* dcbPri flag value */; - u8 dcb_tc /* dcb TC value */; - u8 dscp_val /* dscp value to write if dscp_enable_flag is set */; - u8 reserved0; + u8 dcb_enable_flag /* Enable DCB */; + u8 dscp_enable_flag /* Enable updating DSCP value */; + u8 dcb_priority /* DCB priority */; + u8 dcb_tc /* DCB TC */; + u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */; +/* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged + * frames + */ + u8 dcb_dont_add_vlan0; }; /* @@ -1575,8 +1588,9 @@ struct pf_update_ramrod_data { /* core iwarp related fields */ struct protocol_dcb_data iwarp_dcb_data; __le16 mf_vlan /* new outer vlan id value */; -/* enables the inner to outer TAG priority mapping. Should be 1 for Bette Davis - * and UFP with Host Control mode, else - 0. +/* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette + * Davis, UFP with Host Control mode, and UFP with DCB over base interface. + * else - 0 */ u8 enable_stag_pri_change; u8 reserved; @@ -1739,6 +1753,7 @@ struct tstorm_per_port_stat { struct regpair eth_vxlan_tunn_filter_discard; /* GENEVE dropped packets */ struct regpair eth_geneve_tunn_filter_discard; + struct regpair eth_gft_drop_pkt /* GFT dropped packets */; }; @@ -2130,6 +2145,53 @@ struct e4_ystorm_core_conn_ag_ctx { }; +struct fw_asserts_ram_section { +/* The offset of the section in the RAM in RAM lines (64-bit units) */ + __le16 section_ram_line_offset; +/* The size of the section in RAM lines (64-bit units) */ + __le16 section_ram_line_size; +/* The offset of the asserts list within the section in dwords */ + u8 list_dword_offset; +/* The size of an assert list element in dwords */ + u8 list_element_dword_size; + u8 list_num_elements /* The number of elements in the asserts list */; +/* The offset of the next list index field within the section in dwords */ + u8 list_next_index_dword_offset; +}; + + +struct fw_ver_num { + u8 major /* Firmware major version number */; + u8 minor /* Firmware minor version number */; + u8 rev /* Firmware revision version number */; + u8 eng /* Firmware engineering version number (for bootleg versions) */; +}; + +struct fw_ver_info { + __le16 tools_ver /* Tools version number */; + u8 image_id /* FW image ID (e.g. main, l2b, kuku) */; + u8 reserved1; + struct fw_ver_num num /* FW version number */; + __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */; + __le32 reserved2; +}; + +struct fw_info { + struct fw_ver_info ver /* FW version information */; +/* Info regarding the FW asserts section in the Storm RAM */ + struct fw_asserts_ram_section fw_asserts_section; +}; + + +struct fw_info_location { + __le32 grc_addr /* GRC address where the fw_info struct is located. */; +/* Size of the fw_info structure (thats located at the grc_addr). */ + __le32 size; +}; + + + + /* * IGU cleanup command */ diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h index ebb66482..262834ed 100644 --- a/drivers/net/qede/base/ecore_hsi_debug_tools.h +++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -225,7 +225,7 @@ enum bin_dbg_buffer_type { * Attention bit mapping */ struct dbg_attn_bit_mapping { - __le16 data; + u16 data; /* The index of an attention in the blocks attentions list * (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits * (if is_unused_bit_cnt=1) @@ -247,14 +247,14 @@ struct dbg_attn_block_type_data { /* Offset of this block attention names in the debug attention name offsets * array */ - __le16 names_offset; - __le16 reserved1; + u16 names_offset; + u16 reserved1; u8 num_regs /* Number of attention registers in this block */; u8 reserved2; /* Offset of this blocks attention registers in the attention registers array * (in dbg_attn_reg units) */ - __le16 regs_offset; + u16 regs_offset; }; /* @@ -272,20 +272,20 @@ struct dbg_attn_block { * Attention register result */ struct dbg_attn_reg_result { - __le32 data; + u32 data; /* STS attention register GRC address (in dwords) */ #define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF #define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 /* Number of attention indexes in this register */ #define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF #define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 -/* The offset of this registers attentions within the blocks attentions - * list (a value in the range 0..number of block attentions-1) +/* The offset of this registers attentions within the blocks attentions list + * (a value in the range 0..number of block attentions-1) */ - __le16 attn_idx_offset; - __le16 reserved; - __le32 sts_val /* Value read from the STS attention register */; - __le32 mask_val /* Value read from the MASK attention register */; + u16 block_attn_offset; + u16 reserved; + u32 sts_val /* Value read from the STS attention register */; + u32 mask_val /* Value read from the MASK attention register */; }; /* @@ -303,7 +303,7 @@ struct dbg_attn_block_result { /* Offset of this registers block attention names in the attention name offsets * array */ - __le16 names_offset; + u16 names_offset; /* result data for each register in the block in which at least one attention * bit is set */ @@ -316,7 +316,7 @@ struct dbg_attn_block_result { * mode header */ struct dbg_mode_hdr { - __le16 data; + u16 data; /* indicates if a mode expression should be evaluated (0/1) */ #define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 #define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 @@ -331,12 +331,11 @@ struct dbg_mode_hdr { * Attention register */ struct dbg_attn_reg { - struct dbg_mode_hdr mode /* Mode header */; -/* The offset of this registers attentions within the blocks attentions - * list (a value in the range 0..number of block attentions-1) +/* The offset of this registers attentions within the blocks attentions list + * (a value in the range 0..number of block attentions-1) */ - __le16 attn_idx_offset; - __le32 data; + u16 block_attn_offset; + u32 data; /* STS attention register GRC address (in dwords) */ #define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF #define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 @@ -344,9 +343,8 @@ struct dbg_attn_reg { #define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF #define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 /* STS_CLR attention register GRC address (in dwords) */ - __le32 sts_clr_address; -/* MASK attention register GRC address (in dwords) */ - __le32 mask_address; + u32 sts_clr_address; + u32 mask_address /* MASK attention register GRC address (in dwords) */; }; @@ -370,7 +368,7 @@ struct dbg_bus_block { /* Indicates if this block has a latency events debug line (0/1). */ u8 has_latency_events; /* Offset of this blocks lines in the Debug Bus lines array. */ - __le16 lines_offset; + u16 lines_offset; }; @@ -383,7 +381,7 @@ struct dbg_bus_block_user_data { /* Indicates if this block has a latency events debug line (0/1). */ u8 has_latency_events; /* Offset of this blocks lines in the debug bus line name offsets array. */ - __le16 names_offset; + u16 names_offset; }; @@ -422,13 +420,13 @@ struct dbg_dump_cond_hdr { * memory data for registers dump */ struct dbg_dump_mem { - __le32 dword0; + u32 dword0; /* register address (in dwords) */ #define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF #define DBG_DUMP_MEM_ADDRESS_SHIFT 0 #define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF /* memory group ID */ #define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 - __le32 dword1; + u32 dword1; /* register size (in dwords) */ #define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF #define DBG_DUMP_MEM_LENGTH_SHIFT 0 @@ -444,7 +442,7 @@ struct dbg_dump_mem { * register data for registers dump */ struct dbg_dump_reg { - __le32 data; + u32 data; /* register address (in dwords) */ #define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */ #define DBG_DUMP_REG_ADDRESS_SHIFT 0 @@ -460,7 +458,7 @@ struct dbg_dump_reg { * split header for registers dump */ struct dbg_dump_split_hdr { - __le32 hdr; + u32 hdr; /* size in dwords of the data following this header */ #define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF #define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 @@ -474,8 +472,7 @@ struct dbg_dump_split_hdr { */ struct dbg_idle_chk_cond_hdr { struct dbg_mode_hdr mode /* Mode header */; -/* size in dwords of the data following this header */ - __le16 data_size; + u16 data_size /* size in dwords of the data following this header */; }; @@ -483,7 +480,7 @@ struct dbg_idle_chk_cond_hdr { * Idle Check condition register */ struct dbg_idle_chk_cond_reg { - __le32 data; + u32 data; /* Register GRC address (in dwords) */ #define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 @@ -493,7 +490,7 @@ struct dbg_idle_chk_cond_reg { /* value from block_id enum */ #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 - __le16 num_entries /* number of registers entries to check */; + u16 num_entries /* number of registers entries to check */; u8 entry_size /* size of registers entry (in dwords) */; u8 start_entry /* index of the first entry to check */; }; @@ -503,7 +500,7 @@ struct dbg_idle_chk_cond_reg { * Idle Check info register */ struct dbg_idle_chk_info_reg { - __le32 data; + u32 data; /* Register GRC address (in dwords) */ #define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 @@ -513,7 +510,7 @@ struct dbg_idle_chk_info_reg { /* value from block_id enum */ #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 - __le16 size /* register size in dwords */; + u16 size /* register size in dwords */; struct dbg_mode_hdr mode /* Mode header */; }; @@ -531,8 +528,8 @@ union dbg_idle_chk_reg { * Idle Check result header */ struct dbg_idle_chk_result_hdr { - __le16 rule_id /* Failing rule index */; - __le16 mem_entry_id /* Failing memory entry index */; + u16 rule_id /* Failing rule index */; + u16 mem_entry_id /* Failing memory entry index */; u8 num_dumped_cond_regs /* number of dumped condition registers */; u8 num_dumped_info_regs /* number of dumped condition registers */; u8 severity /* from dbg_idle_chk_severity_types enum */; @@ -552,7 +549,7 @@ struct dbg_idle_chk_result_reg_hdr { #define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F #define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 u8 start_entry /* index of the first checked entry */; - __le16 size /* register size in dwords */; + u16 size /* register size in dwords */; }; @@ -560,7 +557,7 @@ struct dbg_idle_chk_result_reg_hdr { * Idle Check rule */ struct dbg_idle_chk_rule { - __le16 rule_id /* Idle Check rule ID */; + u16 rule_id /* Idle Check rule ID */; u8 severity /* value from dbg_idle_chk_severity_types enum */; u8 cond_id /* Condition ID */; u8 num_cond_regs /* number of condition registers */; @@ -570,11 +567,11 @@ struct dbg_idle_chk_rule { /* offset of this rules registers in the idle check register array * (in dbg_idle_chk_reg units) */ - __le16 reg_offset; + u16 reg_offset; /* offset of this rules immediate values in the immediate values array * (in dwords) */ - __le16 imm_offset; + u16 imm_offset; }; @@ -582,7 +579,7 @@ struct dbg_idle_chk_rule { * Idle Check rule parsing data */ struct dbg_idle_chk_rule_parsing_data { - __le32 data; + u32 data; /* indicates if this register has a FW message */ #define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 #define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 @@ -693,8 +690,8 @@ struct dbg_bus_trigger_state_data { * Debug Bus memory address */ struct dbg_bus_mem_addr { - __le32 lo; - __le32 hi; + u32 lo; + u32 hi; }; /* @@ -703,7 +700,7 @@ struct dbg_bus_mem_addr { struct dbg_bus_pci_buf_data { struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */; struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */; - __le32 size /* PCI buffer size in bytes */; + u32 size /* PCI buffer size in bytes */; }; /* @@ -747,21 +744,20 @@ struct dbg_bus_storm_data { u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */; /* EID filter params to filter on. Valid only if eid_filter_en is set. */ union dbg_bus_storm_eid_params eid_filter_params; -/* CID to filter on. Valid only if cid_filter_en is set. */ - __le32 cid; + u32 cid /* CID to filter on. Valid only if cid_filter_en is set. */; }; /* * Debug Bus data */ struct dbg_bus_data { - __le32 app_version /* The tools version number of the application */; + u32 app_version /* The tools version number of the application */; u8 state /* The current debug bus state */; u8 hw_dwords /* HW dwords per cycle */; /* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the * HW ID of dword/qword i */ - __le16 hw_id_mask; + u16 hw_id_mask; u8 num_enabled_blocks /* Number of blocks enabled for recording */; u8 num_enabled_storms /* Number of Storms enabled for recording */; u8 target /* Output target */; @@ -783,7 +779,7 @@ struct dbg_bus_data { * Valid only if both filter and trigger are enabled (0/1) */ u8 filter_post_trigger; - __le16 reserved; + u16 reserved; /* Indicates if the recording trigger is enabled (0/1) */ u8 trigger_en; /* trigger states data */ @@ -933,9 +929,10 @@ struct dbg_grc_data { /* Indicates if the GRC parameters were initialized */ u8 params_initialized; u8 reserved1; - __le16 reserved2; -/* Value of each GRC parameter. Array size must match enum dbg_grc_params. */ - __le32 param_val[48]; + u16 reserved2; +/* Value of each GRC parameter. Array size must match the enum dbg_grc_params. + */ + u32 param_val[48]; }; @@ -960,7 +957,8 @@ enum dbg_grc_params { DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */, DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */, DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */, - DBG_GRC_PARAM_RESERVED /* reserved */, +/* MCP Trace meta data size in bytes */ + DBG_GRC_PARAM_MCP_TRACE_META_SIZE, DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */, DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */, DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */, @@ -1087,11 +1085,11 @@ enum dbg_storms { * Idle Check data */ struct idle_chk_data { - __le32 buf_size /* Idle check buffer size in dwords */; + u32 buf_size /* Idle check buffer size in dwords */; /* Indicates if the idle check buffer size was set (0/1) */ u8 buf_size_set; u8 reserved1; - __le16 reserved2; + u16 reserved2; }; /* @@ -1109,7 +1107,7 @@ struct dbg_tools_data { u8 initialized /* Indicates if the data was initialized */; u8 use_dmae /* Indicates if DMAE should be used */; /* Numbers of registers that were read since last log */ - __le32 num_regs_read; + u32 num_regs_read; }; diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h index ffbf5c71..efe3bb24 100644 --- a/drivers/net/qede/base/ecore_hsi_eth.h +++ b/drivers/net/qede/base/ecore_hsi_eth.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -346,7 +346,7 @@ struct e4_xstorm_eth_conn_ag_ctx { __le16 edpm_num_bds /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_prod /* word4 */; - __le16 tx_class /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; u8 byte3 /* byte3 */; u8 byte4 /* byte4 */; @@ -1034,7 +1034,6 @@ struct eth_vport_rx_mode { #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 #define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF #define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 - __le16 reserved2[3]; }; @@ -1046,11 +1045,11 @@ struct eth_vport_tpa_param { u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */; u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */; u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */; -/* If set, start each tpa segment on new SGE (GRO mode). One SGE per segment - * allowed +/* If set, start each TPA segment on new BD (GRO mode). One BD per segment + * allowed. */ u8 tpa_pkt_split_flg; -/* If set, put header of first TPA segment on bd and data on SGE */ +/* If set, put header of first TPA segment on first BD and data on second BD. */ u8 tpa_hdr_data_split_flg; /* If set, GRO data consistent will checked for TPA continue */ u8 tpa_gro_consistent_flg; @@ -1089,7 +1088,6 @@ struct eth_vport_tx_mode { #define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 #define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF #define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 - __le16 reserved2[3]; }; @@ -1216,7 +1214,9 @@ struct rx_queue_update_ramrod_data { u8 complete_cqe_flg /* post completion to the CQE ring if set */; u8 complete_event_flg /* post completion to the event ring if set */; u8 vport_id /* ID of virtual port */; - u8 reserved[4]; +/* If set, update default rss queue to this RX queue. */ + u8 set_default_rss_queue; + u8 reserved[3]; u8 reserved1 /* FW reserved. */; u8 reserved2 /* FW reserved. */; u8 reserved3 /* FW reserved. */; @@ -1257,7 +1257,8 @@ struct rx_update_gft_filter_data { __le16 action_icid; __le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */; __le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */; - u8 vport_id /* RX vport Id. */; +/* RX vport Id. For drop flow, set to ETH_GFT_TRASHCAN_VPORT. */ + __le16 vport_id; /* If set, action_icid will used for GFT filter update. */ u8 action_icid_valid; /* If set, rx_qid will used for traffic steering, in additional to vport_id. @@ -1273,7 +1274,10 @@ struct rx_update_gft_filter_data { * case of error. */ u8 assert_on_error; - u8 reserved[2]; +/* If set, inner VLAN will be removed regardless to VPORT configuration. + * Supported by E4 only. + */ + u8 inner_vlan_removal_en; }; @@ -1403,7 +1407,7 @@ struct vport_start_ramrod_data { u8 ctl_frame_mac_check_en; /* If set, control frames will be filtered according to ethtype check. */ u8 ctl_frame_ethtype_check_en; - u8 reserved[5]; + u8 reserved[1]; }; @@ -1486,6 +1490,7 @@ struct vport_update_ramrod_data { struct vport_update_ramrod_data_cmn common; struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */; struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */; + __le32 reserved[3]; /* TPA configuration parameters */ struct eth_vport_tpa_param tpa_param; struct vport_update_ramrod_mcast approx_mcast; @@ -1809,7 +1814,7 @@ struct E4XstormEthConnAgCtxDqExtLdPart { __le16 edpm_num_bds /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_prod /* word4 */; - __le16 tx_class /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; u8 byte3 /* byte3 */; u8 byte4 /* byte4 */; @@ -2153,7 +2158,7 @@ struct e4_xstorm_eth_hw_conn_ag_ctx { __le16 edpm_num_bds /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_prod /* word4 */; - __le16 tx_class /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; }; diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h index 48b0048f..c318514b 100644 --- a/drivers/net/qede/base/ecore_hsi_init_func.h +++ b/drivers/net/qede/base/ecore_hsi_init_func.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -24,10 +24,10 @@ * BRB RAM init requirements */ struct init_brb_ram_req { - __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */; - __le32 headroom_per_tc /* headroom size per TC, in bytes */; - __le32 min_pkt_size /* min packet size, in bytes */; - __le32 max_ports_per_engine /* min packet size, in bytes */; + u32 guranteed_per_tc /* guaranteed size per TC, in bytes */; + u32 headroom_per_tc /* headroom size per TC, in bytes */; + u32 min_pkt_size /* min packet size, in bytes */; + u32 max_ports_per_engine /* min packet size, in bytes */; u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */; }; @@ -44,15 +44,14 @@ struct init_ets_tc_req { * (indicated by the weight field) */ u8 use_wfq; -/* An arbitration weight. Valid only if use_wfq is set. */ - __le16 weight; + u16 weight /* An arbitration weight. Valid only if use_wfq is set. */; }; /* * ETS init requirements */ struct init_ets_req { - __le32 mtu /* Max packet size (in bytes) */; + u32 mtu /* Max packet size (in bytes) */; /* ETS initialization requirements per TC. */ struct init_ets_tc_req tc_req[NUM_OF_TCS]; }; @@ -64,12 +63,12 @@ struct init_ets_req { */ struct init_nig_lb_rl_req { /* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */ - __le16 lb_mac_rate; + u16 lb_mac_rate; /* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */ - __le16 lb_rate; - __le32 mtu /* Max packet size (in bytes) */; + u16 lb_rate; + u32 mtu /* Max packet size (in bytes) */; /* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */ - __le16 tc_rate[NUM_OF_PHYS_TCS]; + u16 tc_rate[NUM_OF_PHYS_TCS]; }; @@ -98,10 +97,10 @@ struct init_qm_port_params { /* Vector of valid bits for active TCs used by this port */ u8 active_phys_tcs; /* number of PBF command lines that can be used by this port */ - __le16 num_pbf_cmd_lines; + u16 num_pbf_cmd_lines; /* number of BTB blocks that can be used by this port */ - __le16 num_btb_blocks; - __le16 reserved; + u16 num_btb_blocks; + u16 reserved; }; @@ -114,6 +113,9 @@ struct init_qm_pq_params { u8 wrr_group /* WRR group */; /* Indicates if a rate limiter should be allocated for the PQ (0/1) */ u8 rl_valid; + u8 port_id /* Port ID */; + u8 reserved0; + u16 reserved1; }; @@ -124,13 +126,13 @@ struct init_qm_vport_params { /* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if * VPORT RL is globally disabled. */ - __le32 vport_rl; + u32 vport_rl; /* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is * globally disabled. */ - __le16 vport_wfq; + u16 vport_wfq; /* the first Tx PQ ID associated with this VPORT for each TC. */ - __le16 first_tx_pq_id[NUM_OF_TCS]; + u16 first_tx_pq_id[NUM_OF_TCS]; }; #endif /* __ECORE_HSI_INIT_FUNC__ */ diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h index 1f57e9b2..2e338a98 100644 --- a/drivers/net/qede/base/ecore_hsi_init_tool.h +++ b/drivers/net/qede/base/ecore_hsi_init_tool.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -30,59 +30,13 @@ enum chip_ids { }; -struct fw_asserts_ram_section { -/* The offset of the section in the RAM in RAM lines (64-bit units) */ - __le16 section_ram_line_offset; -/* The size of the section in RAM lines (64-bit units) */ - __le16 section_ram_line_size; -/* The offset of the asserts list within the section in dwords */ - u8 list_dword_offset; -/* The size of an assert list element in dwords */ - u8 list_element_dword_size; - u8 list_num_elements /* The number of elements in the asserts list */; -/* The offset of the next list index field within the section in dwords */ - u8 list_next_index_dword_offset; -}; - - -struct fw_ver_num { - u8 major /* Firmware major version number */; - u8 minor /* Firmware minor version number */; - u8 rev /* Firmware revision version number */; -/* Firmware engineering version number (for bootleg versions) */ - u8 eng; -}; - -struct fw_ver_info { - __le16 tools_ver /* Tools version number */; - u8 image_id /* FW image ID (e.g. main, l2b, kuku) */; - u8 reserved1; - struct fw_ver_num num /* FW version number */; - __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */; - __le32 reserved2; -}; - -struct fw_info { - struct fw_ver_info ver /* FW version information */; -/* Info regarding the FW asserts section in the Storm RAM */ - struct fw_asserts_ram_section fw_asserts_section; -}; - - -struct fw_info_location { -/* GRC address where the fw_info struct is located. */ - __le32 grc_addr; -/* Size of the fw_info structure (thats located at the grc_addr). */ - __le32 size; -}; - /* * Binary buffer header */ struct bin_buffer_hdr { /* buffer offset in bytes from the beginning of the binary file */ - __le32 offset; - __le32 length /* buffer length in bytes */; + u32 offset; + u32 length /* buffer length in bytes */; }; @@ -103,7 +57,7 @@ enum bin_init_buffer_type { * init array header: raw */ struct init_array_raw_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF #define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 @@ -116,7 +70,7 @@ struct init_array_raw_hdr { * init array header: standard */ struct init_array_standard_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF #define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 @@ -129,7 +83,7 @@ struct init_array_standard_hdr { * init array header: zipped */ struct init_array_zipped_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF #define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 @@ -142,7 +96,7 @@ struct init_array_zipped_hdr { * init array header: pattern */ struct init_array_pattern_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF #define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 @@ -223,14 +177,14 @@ enum init_array_types { * init operation: callback */ struct init_callback_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_CALLBACK_OP_OP_MASK 0xF #define INIT_CALLBACK_OP_OP_SHIFT 0 #define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF #define INIT_CALLBACK_OP_RESERVED_SHIFT 4 - __le16 callback_id /* Callback ID */; - __le16 block_id /* Blocks ID */; + u16 callback_id /* Callback ID */; + u16 block_id /* Blocks ID */; }; @@ -238,7 +192,7 @@ struct init_callback_op { * init operation: delay */ struct init_delay_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_DELAY_OP_OP_MASK 0xF #define INIT_DELAY_OP_OP_SHIFT 0 @@ -252,7 +206,7 @@ struct init_delay_op { * init operation: if_mode */ struct init_if_mode_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_IF_MODE_OP_OP_MASK 0xF #define INIT_IF_MODE_OP_OP_SHIFT 0 @@ -261,9 +215,8 @@ struct init_if_mode_op { /* Commands to skip if the modes dont match */ #define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 - __le16 reserved2; -/* offset (in bytes) in modes expression buffer */ - __le16 modes_buf_offset; + u16 reserved2; + u16 modes_buf_offset /* offset (in bytes) in modes expression buffer */; }; @@ -271,7 +224,7 @@ struct init_if_mode_op { * init operation: if_phase */ struct init_if_phase_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_IF_PHASE_OP_OP_MASK 0xF #define INIT_IF_PHASE_OP_OP_SHIFT 0 @@ -283,7 +236,7 @@ struct init_if_phase_op { /* Commands to skip if the phases dont match */ #define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 - __le32 phase_data; + u32 phase_data; #define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */ #define INIT_IF_PHASE_OP_PHASE_SHIFT 0 #define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF @@ -308,21 +261,21 @@ enum init_mode_ops { * init operation: raw */ struct init_raw_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_RAW_OP_OP_MASK 0xF #define INIT_RAW_OP_OP_SHIFT 0 #define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */ #define INIT_RAW_OP_PARAM1_SHIFT 4 - __le32 param2 /* Init param 2 */; + u32 param2 /* Init param 2 */; }; /* * init array params */ struct init_op_array_params { - __le16 size /* array size in dwords */; - __le16 offset /* array start offset in dwords */; + u16 size /* array size in dwords */; + u16 offset /* array start offset in dwords */; }; /* @@ -330,11 +283,11 @@ struct init_op_array_params { */ union init_write_args { /* value to write, used when init source is INIT_SRC_INLINE */ - __le32 inline_val; + u32 inline_val; /* number of zeros to write, used when init source is INIT_SRC_ZEROS */ - __le32 zeros_count; + u32 zeros_count; /* array offset to write, used when init source is INIT_SRC_ARRAY */ - __le32 array_offset; + u32 array_offset; /* runtime array params to write, used when init source is INIT_SRC_RUNTIME */ struct init_op_array_params runtime; }; @@ -343,7 +296,7 @@ union init_write_args { * init operation: write */ struct init_write_op { - __le32 data; + u32 data; /* init operation, from init_op_types enum */ #define INIT_WRITE_OP_OP_MASK 0xF #define INIT_WRITE_OP_OP_SHIFT 0 @@ -365,7 +318,7 @@ struct init_write_op { * init operation: read */ struct init_read_op { - __le32 op_data; + u32 op_data; /* init operation, from init_op_types enum */ #define INIT_READ_OP_OP_MASK 0xF #define INIT_READ_OP_OP_SHIFT 0 @@ -378,7 +331,7 @@ struct init_read_op { #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 /* expected polling value, used only when polling is done */ - __le32 expected_val; + u32 expected_val; }; /* @@ -444,11 +397,11 @@ enum init_source_types { * Internal RAM Offsets macro data */ struct iro { - __le32 base /* RAM field offset */; - __le16 m1 /* multiplier 1 */; - __le16 m2 /* multiplier 2 */; - __le16 m3 /* multiplier 3 */; - __le16 size /* RAM field size */; + u32 base /* RAM field offset */; + u16 m1 /* multiplier 1 */; + u16 m2 /* multiplier 2 */; + u16 m3 /* multiplier 3 */; + u16 size /* RAM field size */; }; #endif /* __ECORE_HSI_INIT_TOOL__ */ diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c index 84f273b0..b00c33d4 100644 --- a/drivers/net/qede/base/ecore_hw.c +++ b/drivers/net/qede/base/ecore_hw.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -38,6 +38,12 @@ struct ecore_ptt_pool { struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; }; +void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool); + p_hwfn->p_ptt_pool = OSAL_NULL; +} + enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn) { struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev, @@ -65,10 +71,12 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn) p_hwfn->p_ptt_pool = p_pool; #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) { + __ecore_ptt_pool_free(p_hwfn); + return ECORE_NOMEM; + } #endif OSAL_SPIN_LOCK_INIT(&p_pool->lock); - return ECORE_SUCCESS; } @@ -89,7 +97,7 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn) if (p_hwfn->p_ptt_pool) OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock); #endif - OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool); + __ecore_ptt_pool_free(p_hwfn); } struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn) @@ -569,7 +577,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32)); if (*p_comp == OSAL_NULL) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `p_completion_word'\n"); goto err; } @@ -578,7 +586,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(struct dmae_cmd)); if (*p_cmd == OSAL_NULL) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct dmae_cmd'\n"); goto err; } @@ -587,12 +595,13 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32) * DMAE_MAX_RW_SIZE); if (*p_buff == OSAL_NULL) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `intermediate_buffer'\n"); goto err; } - p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; + p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; + p_hwfn->dmae_info.b_mem_ready = true; return ECORE_SUCCESS; err: @@ -604,8 +613,9 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn) { dma_addr_t p_phys; - /* Just make sure no one is in the middle */ - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); + p_hwfn->dmae_info.b_mem_ready = false; + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) { p_phys = p_hwfn->dmae_info.completion_word_phys_addr; @@ -630,8 +640,6 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn) p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE); p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL; } - - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); } static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn) @@ -777,6 +785,15 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn, enum _ecore_status_t ecore_status = ECORE_SUCCESS; u32 offset = 0; + if (!p_hwfn->dmae_info.b_mem_ready) { + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n", + (unsigned long)src_addr, src_type, + (unsigned long)dst_addr, dst_type, + size_in_dwords); + return ECORE_NOMEM; + } + if (p_hwfn->p_dev->recov_in_prog) { DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n", @@ -870,7 +887,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); params.flags = flags; - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr, grc_addr_in_dw, @@ -878,7 +895,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, ECORE_DMAE_ADDRESS_GRC, size_in_dwords, ¶ms); - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); return rc; } @@ -896,14 +913,14 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn, OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); params.flags = flags; - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, dest_addr, ECORE_DMAE_ADDRESS_GRC, ECORE_DMAE_ADDRESS_HOST_VIRT, size_in_dwords, ¶ms); - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); return rc; } @@ -917,7 +934,7 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, { enum _ecore_status_t rc; - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr, dest_addr, @@ -925,7 +942,7 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, ECORE_DMAE_ADDRESS_HOST_PHYS, size_in_dwords, p_params); - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); return rc; } @@ -944,3 +961,74 @@ void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type); } + +enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + const char *phase) +{ + u32 size = OSAL_PAGE_SIZE / 2, val; + struct ecore_dmae_params params; + enum _ecore_status_t rc = ECORE_SUCCESS; + dma_addr_t p_phys; + void *p_virt; + u32 *p_tmp; + + p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size); + if (!p_virt) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: failed to allocate memory\n", + phase); + return ECORE_NOMEM; + } + + /* Fill the bottom half of the allocated memory with a known pattern */ + for (p_tmp = (u32 *)p_virt; + p_tmp < (u32 *)((u8 *)p_virt + size); + p_tmp++) { + /* Save the address itself as the value */ + val = (u32)(osal_uintptr_t)p_tmp; + *p_tmp = val; + } + + /* Zero the top half of the allocated memory */ + OSAL_MEM_ZERO((u8 *)p_virt + size, size); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n", + phase, (unsigned long)p_phys, p_virt, + (unsigned long)(p_phys + size), + (u8 *)p_virt + size, size); + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size, + size / 4 /* size_in_dwords */, ¶ms); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n", + phase, rc); + goto out; + } + + /* Verify that the top half of the allocated memory has the pattern */ + for (p_tmp = (u32 *)((u8 *)p_virt + size); + p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); + p_tmp++) { + /* The corresponding address in the bottom half */ + val = (u32)(osal_uintptr_t)p_tmp - size; + + if (*p_tmp != val) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n", + phase, + (unsigned long)p_phys + + ((u8 *)p_tmp - (u8 *)p_virt), + p_tmp, *p_tmp, val); + rc = ECORE_UNKNOWN_ERROR; + goto out; + } + } + +out: + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size); + return rc; +} diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h index 0b9814f5..f3f513e8 100644 --- a/drivers/net/qede/base/ecore_hw.h +++ b/drivers/net/qede/base/ecore_hw.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -255,4 +255,8 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type); +enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + const char *phase); + #endif /* __ECORE_HW_H__ */ diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h index 4456af43..2f4bd536 100644 --- a/drivers/net/qede/base/ecore_hw_defs.h +++ b/drivers/net/qede/base/ecore_hw_defs.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c index 1da80a65..3f986629 100644 --- a/drivers/net/qede/base/ecore_init_fw_funcs.c +++ b/drivers/net/qede/base/ecore_init_fw_funcs.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -76,12 +76,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) /* RL increment value - rate is specified in mbps. the factor of 1.01 was -* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC -* 2544 test. In this scenario the PF RL was reducing the line rate to 99% -* although the credit increment value was the correct one and FW calculated -* correct packet sizes. The reason for the inaccuracy of the RL is unknown at -* this point. -*/ + * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC + * 2544 test. In this scenario the PF RL was reducing the line rate to 99% + * although the credit increment value was the correct one and FW calculated + * correct packet sizes. The reason for the inaccuracy of the RL is unknown at + * this point. + */ #define QM_RL_INC_VAL(rate) \ OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \ (8 * 100)), 1) @@ -182,7 +182,7 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \ ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24)) #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ - (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4) + (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4) /******************** INTERNAL IMPLEMENTATION *********************/ @@ -421,9 +421,9 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn, /* Prepare Tx PQ mapping runtime init values for the specified PF */ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u8 port_id, u8 pf_id, u8 max_phys_tcs_per_port, + bool is_pf_loading, u32 num_pf_cids, u32 num_vf_cids, u16 start_pq, @@ -437,7 +437,7 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, /* A bit per Tx PQ indicating if the PQ is associated with a VF */ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; - u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group; + u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; num_pqs = num_pf_pqs + num_vf_pqs; @@ -467,11 +467,11 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, bool is_vf_pq, rl_valid; u16 first_tx_pq_id; - ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, + ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id, + pq_params[i].tc_id, max_phys_tcs_per_port); is_vf_pq = (i >= num_pf_pqs); - rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id < - max_qm_global_rls; + rl_valid = pq_params[i].rl_valid > 0; /* Update first Tx PQ of VPORT/TC */ vport_id_in_pf = pq_params[i].vport_id - start_vport; @@ -492,28 +492,38 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, } /* Check RL ID */ - if (pq_params[i].rl_valid && pq_params[i].vport_id >= - max_qm_global_rls) + if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) { DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter config\n"); + rl_valid = false; + } /* Prepare PQ map entry */ struct qm_rf_pq_map_e4 tx_pq_map; + QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group); - /* Set base address */ + /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); + /* Clear PQ pointer table entry (64 bit) */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET + + (pq_id * 2) + j, 0); + /* Write PQ info to RAM */ if (WRITE_PQ_INFO_TO_RAM != 0) { u32 pq_info = 0; + pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, - pq_params[i].tc_id, port_id, + pq_params[i].tc_id, + pq_params[i].port_id, rl_valid ? 1 : 0, rl_valid ? pq_params[i].vport_id : 0); ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), @@ -540,12 +550,13 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, /* Prepare Other PQ mapping runtime init values for the specified PF */ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, + bool is_pf_loading, u32 num_pf_cids, u32 num_tids, u32 base_mem_addr_4kb) { u32 pq_size, pq_mem_4kb, mem_addr_4kb; - u16 i, pq_id, pq_group; + u16 i, j, pq_id, pq_group; /* A single other PQ group is used in each PF, where PQ group i is used * in PF i. @@ -563,11 +574,19 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); - /* Set base address */ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { + /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb); + + /* Clear PQ pointer table entry */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, + QM_REG_PTRTBLOTHER_RT_OFFSET + + (pq_id * 2) + j, 0); + mem_addr_4kb += pq_mem_4kb; } } @@ -576,7 +595,6 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, * Return -1 on error. */ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, - u8 port_id, u8 pf_id, u16 pf_wfq, u8 max_phys_tcs_per_port, @@ -595,7 +613,8 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, } for (i = 0; i < num_tx_pqs; i++) { - ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, + ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id, + pq_params[i].tc_id, max_phys_tcs_per_port); crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : @@ -604,12 +623,12 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, (pf_id % MAX_NUM_PFS_BB); OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, - QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, - inc_val); } + STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val); + return 0; } @@ -820,9 +839,9 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u8 port_id, u8 pf_id, u8 max_phys_tcs_per_port, + bool is_pf_loading, u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, @@ -850,20 +869,21 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, /* Map Other PQs (if any) */ #if QM_OTHER_PQS_PER_PF > 0 - ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0); + ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids, + num_tids, 0); #endif /* Map Tx PQs */ - ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, - max_phys_tcs_per_port, num_pf_cids, num_vf_cids, + ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port, + is_pf_loading, num_pf_cids, num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params); /* Init PF WFQ */ if (pf_wfq) - if (ecore_pf_wfq_rt_init - (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port, - num_pf_pqs + num_vf_pqs, pq_params)) + if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq, + max_phys_tcs_per_port, + num_pf_pqs + num_vf_pqs, pq_params)) return -1; /* Init PF RL */ @@ -1419,7 +1439,9 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType) #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0)) -#define PRS_ETH_TUNN_FIC_FORMAT -188897008 +#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008 +#define PRS_ETH_OUTPUT_FORMAT -46832 + void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 dest_port) { @@ -1444,9 +1466,14 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, vxlan_enable); ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ @@ -1476,9 +1503,14 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable); ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ @@ -1526,9 +1558,14 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, ip_geneve_enable); ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ @@ -1548,6 +1585,36 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, ip_geneve_enable ? 1 : 0); } +#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4 +#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512 + +void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool enable) +{ + u32 reg_val, cfg_mask; + + /* read PRS config register */ + reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); + + /* set VXLAN_NO_L2_ENABLE mask */ + cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); + + if (enable) { + /* set VXLAN_NO_L2_ENABLE flag */ + reg_val |= cfg_mask; + + /* update PRS FIC register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); + } else { + /* clear VXLAN_NO_L2_ENABLE flag */ + reg_val &= ~cfg_mask; + } + + /* write PRS config register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); +} #define T_ETH_PACKET_ACTION_GFT_EVENTID 23 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 @@ -1664,6 +1731,10 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn, ram_line_lo = 0; ram_line_hi = 0; + /* Tunnel type */ + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); + if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); @@ -1675,9 +1746,14 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn, SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); - } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) { + } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { + SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); } ecore_wr(p_hwfn, p_ptt, @@ -1921,3 +1997,53 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn, ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } + +#define RSS_IND_TABLE_BASE_ADDR 4112 +#define RSS_IND_TABLE_VPORT_SIZE 16 +#define RSS_IND_TABLE_ENTRY_PER_LINE 8 + +/* Update RSS indirection table entry. */ +void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 rss_id, + u8 ind_table_index, + u16 ind_table_value) +{ + u32 cnt, rss_addr; + u32 *reg_val; + u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE]; + u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE]; + + /* get entry address */ + rss_addr = RSS_IND_TABLE_BASE_ADDR + + RSS_IND_TABLE_VPORT_SIZE * rss_id + + ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE; + + /* prepare update command */ + ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE; + + for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) { + if (cnt == ind_table_index) { + rss_ind_entry[cnt] = ind_table_value; + rss_ind_mask[cnt] = 0xFFFF; + } else { + rss_ind_entry[cnt] = 0; + rss_ind_mask[cnt] = 0; + } + } + + /* Update entry in HW*/ + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); + + reg_val = (u32 *)rss_ind_mask; + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]); + + reg_val = (u32 *)rss_ind_entry; + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]); +} diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h index ab560e59..310c9ed7 100644 --- a/drivers/net/qede/base/ecore_init_fw_funcs.h +++ b/drivers/net/qede/base/ecore_init_fw_funcs.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -61,9 +61,10 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, * * @param p_hwfn * @param p_ptt - ptt window used for writing the registers - * @param port_id - port ID * @param pf_id - PF ID * @param max_phys_tcs_per_port - max number of physical TCs per port in HW + * @param is_pf_loading - indicates if the PF is currently loading, + * i.e. it has no allocated QM resources. * @param num_pf_cids - number of connections used by this PF * @param num_vf_cids - number of connections used by VFs of this PF * @param num_tids - number of tasks used by this PF @@ -87,9 +88,9 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, */ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u8 port_id, u8 pf_id, u8 max_phys_tcs_per_port, + bool is_pf_loading, u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, @@ -259,6 +260,16 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, struct init_brb_ram_req *req); #endif /* UNUSED_HSI_FUNC */ +/** + * @brief ecore_set_vxlan_no_l2_enable - enable or disable VXLAN no L2 parsing + * + * @param p_ptt - ptt window used for writing the registers. + * @param enable - VXLAN no L2 enable flag. + */ +void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool enable); + #ifndef UNUSED_HSI_FUNC /** * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to @@ -462,4 +473,22 @@ void ecore_memset_session_ctx(void *p_ctx_mem, void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); + +/** + * @brief ecore_update_eth_rss_ind_table_entry - Update RSS indirection table + * entry. + * The function must run in exclusive mode to prevent wrong RSS configuration. + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param rss_id - RSS engine ID. + * @param ind_table_index - RSS indirect table index. + * @param ind_table_value - RSS indirect table new value. + */ +void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 rss_id, + u8 ind_table_index, + u16 ind_table_value); + #endif diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c index 91633c11..eadccf40 100644 --- a/drivers/net/qede/base/ecore_init_ops.c +++ b/drivers/net/qede/base/ecore_init_ops.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -389,23 +389,29 @@ static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn, } if (i == ECORE_INIT_MAX_POLL_COUNT) - DP_ERR(p_hwfn, - "Timeout when polling reg: 0x%08x [ Waiting-for: %08x" - " Got: %08x (comparsion %08x)]\n", + DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", addr, OSAL_LE32_TO_CPU(cmd->expected_val), val, OSAL_LE32_TO_CPU(cmd->op_data)); } -/* init_ops callbacks entry point. - * OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings. - * Should be removed when the function is actually used. - */ -static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, - struct ecore_ptt OSAL_UNUSED * p_ptt, - struct init_callback_op OSAL_UNUSED * p_cmd) +/* init_ops callbacks entry point */ +static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_callback_op *p_cmd) { - DP_NOTICE(p_hwfn, true, - "Currently init values have no need of callbacks\n"); + enum _ecore_status_t rc; + + switch (p_cmd->callback_id) { + case DMAE_READY_CB: + rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); + break; + default: + DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n", + p_cmd->callback_id); + return ECORE_INVAL; + } + + return rc; } static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn, @@ -513,7 +519,7 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, break; case INIT_OP_CALLBACK: - ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); + rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); break; } diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h index e293a4a3..7ca00e4e 100644 --- a/drivers/net/qede/base/ecore_init_ops.h +++ b/drivers/net/qede/base/ecore_init_ops.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c index e6cef85b..7272f059 100644 --- a/drivers/net/qede/base/ecore_int.c +++ b/drivers/net/qede/base/ecore_int.c @@ -1,11 +1,13 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ +#include + #include "bcm_osal.h" #include "ecore.h" #include "ecore_spq.h" @@ -285,9 +287,11 @@ out: #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt) + struct ecore_ptt *p_ptt, + bool is_hw_init) { u32 tmp; + char str[512] = {0}; tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); if (tmp & ECORE_PGLUE_ATTENTION_VALID) { @@ -299,9 +303,8 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, PGLUE_B_REG_TX_ERR_WR_ADD_63_32); details = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - - DP_NOTICE(p_hwfn, false, - "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + OSAL_SNPRINTF(str, 512, + "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", addr_hi, addr_lo, details, (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> @@ -318,6 +321,10 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 1 : 0), (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0)); + if (is_hw_init) + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str); + else + DP_NOTICE(p_hwfn, false, "%s", str); } tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); @@ -393,7 +400,7 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) { - return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); + return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); } static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) @@ -1104,9 +1111,9 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, p_aeu->bit_name, num); else - OSAL_STRNCPY(bit_name, - p_aeu->bit_name, - 30); + strlcpy(bit_name, + p_aeu->bit_name, + sizeof(bit_name)); /* We now need to pass bitmask in its * correct position. @@ -1406,8 +1413,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, /* SB struct */ p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); if (!p_sb) { - DP_NOTICE(p_dev, true, - "Failed to allocate `struct ecore_sb_attn_info'\n"); + DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); return ECORE_NOMEM; } @@ -1415,8 +1421,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, SB_ATTN_ALIGNED_SIZE(p_hwfn)); if (!p_virt) { - DP_NOTICE(p_dev, true, - "Failed to allocate status block (attentions)\n"); + DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); OSAL_FREE(p_dev, p_sb); return ECORE_NOMEM; } @@ -1795,8 +1800,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb)); if (!p_sb) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_sb_info'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n"); return ECORE_NOMEM; } @@ -1804,7 +1808,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, SB_ALIGNED_SIZE(p_hwfn)); if (!p_virt) { - DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); OSAL_FREE(p_hwfn->p_dev, p_sb); return ECORE_NOMEM; } diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h index 563051c3..bb22fdd8 100644 --- a/drivers/net/qede/base/ecore_int.h +++ b/drivers/net/qede/base/ecore_int.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -256,6 +256,7 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, #endif enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt); + struct ecore_ptt *p_ptt, + bool is_hw_init); #endif /* __ECORE_INT_H__ */ diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h index 24cdf5ed..dff53776 100644 --- a/drivers/net/qede/base/ecore_int_api.h +++ b/drivers/net/qede/base/ecore_int_api.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h index 218ef50b..ee7cad74 100644 --- a/drivers/net/qede/base/ecore_iov_api.h +++ b/drivers/net/qede/base/ecore_iov_api.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -539,6 +539,17 @@ bool ecore_iov_is_valid_vfpf_msg_length(u32 length); */ u32 ecore_iov_pfvf_msg_length(void); +/** + * @brief Returns MAC address if one is configured + * + * @parm p_hwfn + * @parm rel_vf_id + * + * @return OSAL_NULL if mac isn't set; Otherwise, returns MAC. + */ +u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + /** * @brief Returns forced MAC address if one is configured * diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h index 360d7f88..e4dc1c92 100644 --- a/drivers/net/qede/base/ecore_iro.h +++ b/drivers/net/qede/base/ecore_iro.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h index 41532eeb..b47abeb9 100644 --- a/drivers/net/qede/base/ecore_iro_values.h +++ b/drivers/net/qede/base/ecore_iro_values.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -13,9 +13,9 @@ static const struct iro iro_arr[51] = { /* YSTORM_FLOW_CONTROL_MODE_OFFSET */ { 0x0, 0x0, 0x0, 0x0, 0x8}, /* TSTORM_PORT_STAT_OFFSET(port_id) */ - { 0x4cb0, 0x80, 0x0, 0x0, 0x80}, + { 0x4cb8, 0x88, 0x0, 0x0, 0x88}, /* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */ - { 0x6508, 0x20, 0x0, 0x0, 0x20}, + { 0x6530, 0x20, 0x0, 0x0, 0x20}, /* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */ { 0xb00, 0x8, 0x0, 0x0, 0x4}, /* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */ @@ -27,49 +27,49 @@ static const struct iro iro_arr[51] = { /* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */ { 0x84, 0x8, 0x0, 0x0, 0x2}, /* XSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x4c40, 0x0, 0x0, 0x0, 0x78}, + { 0x4c48, 0x0, 0x0, 0x0, 0x78}, /* YSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x3e10, 0x0, 0x0, 0x0, 0x78}, + { 0x3e38, 0x0, 0x0, 0x0, 0x78}, /* PSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x2b50, 0x0, 0x0, 0x0, 0x78}, + { 0x2b78, 0x0, 0x0, 0x0, 0x78}, /* TSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x4c38, 0x0, 0x0, 0x0, 0x78}, + { 0x4c40, 0x0, 0x0, 0x0, 0x78}, /* MSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x4990, 0x0, 0x0, 0x0, 0x78}, + { 0x4998, 0x0, 0x0, 0x0, 0x78}, /* USTORM_INTEG_TEST_DATA_OFFSET */ - { 0x7f48, 0x0, 0x0, 0x0, 0x78}, + { 0x7f50, 0x0, 0x0, 0x0, 0x78}, /* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */ { 0xa28, 0x8, 0x0, 0x0, 0x8}, /* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */ - { 0x61e8, 0x10, 0x0, 0x0, 0x10}, + { 0x6210, 0x10, 0x0, 0x0, 0x10}, /* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */ { 0xb820, 0x30, 0x0, 0x0, 0x30}, /* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */ - { 0x96b8, 0x30, 0x0, 0x0, 0x30}, + { 0x96c0, 0x30, 0x0, 0x0, 0x30}, /* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */ - { 0x4b60, 0x80, 0x0, 0x0, 0x40}, + { 0x4b68, 0x80, 0x0, 0x0, 0x40}, /* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */ { 0x1f8, 0x4, 0x0, 0x0, 0x4}, /* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */ - { 0x53a0, 0x80, 0x4, 0x0, 0x4}, + { 0x53a8, 0x80, 0x4, 0x0, 0x4}, /* MSTORM_TPA_TIMEOUT_US_OFFSET */ - { 0xc7c8, 0x0, 0x0, 0x0, 0x4}, + { 0xc7d0, 0x0, 0x0, 0x0, 0x4}, /* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */ - { 0x4ba0, 0x80, 0x0, 0x0, 0x20}, + { 0x4ba8, 0x80, 0x0, 0x0, 0x20}, /* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */ - { 0x8150, 0x40, 0x0, 0x0, 0x30}, + { 0x8158, 0x40, 0x0, 0x0, 0x30}, /* USTORM_ETH_PF_STAT_OFFSET(pf_id) */ { 0xe770, 0x60, 0x0, 0x0, 0x60}, /* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */ - { 0x2ce8, 0x80, 0x0, 0x0, 0x38}, + { 0x2d10, 0x80, 0x0, 0x0, 0x38}, /* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */ - { 0xf2b0, 0x78, 0x0, 0x0, 0x78}, + { 0xf2b8, 0x78, 0x0, 0x0, 0x78}, /* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */ { 0x1f8, 0x4, 0x0, 0x0, 0x4}, /* TSTORM_ETH_PRS_INPUT_OFFSET */ - { 0xaef8, 0x0, 0x0, 0x0, 0xf0}, + { 0xaf20, 0x0, 0x0, 0x0, 0xf0}, /* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */ - { 0xafe8, 0x8, 0x0, 0x0, 0x8}, + { 0xb010, 0x8, 0x0, 0x0, 0x8}, /* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */ { 0x1f8, 0x8, 0x0, 0x0, 0x8}, /* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */ @@ -81,37 +81,37 @@ static const struct iro iro_arr[51] = { /* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */ { 0x0, 0x8, 0x0, 0x0, 0x8}, /* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */ - { 0x200, 0x18, 0x8, 0x0, 0x8}, + { 0x400, 0x18, 0x8, 0x0, 0x8}, /* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */ { 0xb78, 0x18, 0x8, 0x0, 0x2}, /* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */ - { 0xd878, 0x50, 0x0, 0x0, 0x3c}, + { 0xd898, 0x50, 0x0, 0x0, 0x3c}, /* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */ { 0x12908, 0x18, 0x0, 0x0, 0x10}, /* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */ { 0x11aa8, 0x40, 0x0, 0x0, 0x18}, /* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */ - { 0xa580, 0x50, 0x0, 0x0, 0x20}, + { 0xa588, 0x50, 0x0, 0x0, 0x20}, /* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */ - { 0x86f8, 0x40, 0x0, 0x0, 0x28}, + { 0x8700, 0x40, 0x0, 0x0, 0x28}, /* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */ - { 0x102f8, 0x18, 0x0, 0x0, 0x10}, + { 0x10300, 0x18, 0x0, 0x0, 0x10}, /* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */ - { 0xde28, 0x48, 0x0, 0x0, 0x38}, + { 0xde48, 0x48, 0x0, 0x0, 0x38}, /* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */ - { 0x10760, 0x20, 0x0, 0x0, 0x20}, + { 0x10768, 0x20, 0x0, 0x0, 0x20}, /* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */ - { 0x2d20, 0x80, 0x0, 0x0, 0x10}, + { 0x2d48, 0x80, 0x0, 0x0, 0x10}, /* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */ - { 0x5020, 0x10, 0x0, 0x0, 0x10}, + { 0x5048, 0x10, 0x0, 0x0, 0x10}, /* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */ - { 0xc9b0, 0x30, 0x0, 0x0, 0x10}, + { 0xc9b8, 0x30, 0x0, 0x0, 0x10}, /* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */ - { 0xeec0, 0x10, 0x0, 0x0, 0x10}, + { 0xed90, 0x10, 0x0, 0x0, 0x10}, /* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */ - { 0xa398, 0x10, 0x0, 0x0, 0x10}, + { 0xa520, 0x10, 0x0, 0x0, 0x10}, /* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */ - { 0x13100, 0x8, 0x0, 0x0, 0x8}, + { 0x13108, 0x8, 0x0, 0x0, 0x8}, }; #endif /* __IRO_VALUES_H__ */ diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c index e3afc8a3..91d89e56 100644 --- a/drivers/net/qede/base/ecore_l2.c +++ b/drivers/net/qede/base/ecore_l2.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -77,7 +77,8 @@ enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn) } #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock); + if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock)) + return ECORE_NOMEM; #endif return ECORE_SUCCESS; @@ -110,6 +111,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn) break; OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage[i]); + p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL; } #ifdef CONFIG_ECORE_LOCK_ALLOC @@ -119,6 +121,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn) #endif OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage); + p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL; out_l2_info: OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info); @@ -687,7 +690,7 @@ ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod, p_ramrod->common.update_approx_mcast_flg = 1; for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { - u32 *p_bins = (u32 *)p_params->bins; + u32 *p_bins = p_params->bins; p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); } @@ -1185,11 +1188,20 @@ ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM * *pp_doorbell) { enum _ecore_status_t rc; + u16 pq_id; + + /* TODO - set tc in the pq_params for multi-cos. + * If pacing is enabled then select queue according to + * rate limiter availability otherwise select queue based + * on multi cos. + */ + if (IS_ECORE_PACING(p_hwfn)) + pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id); + else + pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc); - /* TODO - set tc in the pq_params for multi-cos */ - rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, - pbl_addr, pbl_size, - ecore_get_cm_pq_idx_mcos(p_hwfn, tc)); + rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr, + pbl_size, pq_id); if (rc != ECORE_SUCCESS) return rc; @@ -1556,8 +1568,8 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, enum spq_mode comp_mode, struct ecore_spq_comp_cb *p_comp_data) { - unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; + u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct ecore_spq_entry *p_ent = OSAL_NULL; struct ecore_sp_init_data init_data; u8 abs_vport_id = 0; @@ -1596,8 +1608,7 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, /* explicitly clear out the entire vector */ OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0, sizeof(p_ramrod->approx_mcast.bins)); - OSAL_MEMSET(bins, 0, sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); /* filter ADD op is explicit set op and it removes * any existing filters for the vport. */ @@ -1606,16 +1617,15 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, u32 bit; bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); - OSAL_SET_BIT(bit, bins); + bins[bit / 32] |= 1 << (bit % 32); } /* Convert to correct endianity */ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { struct vport_update_ramrod_mcast *p_ramrod_bins; - u32 *p_bins = (u32 *)bins; p_ramrod_bins = &p_ramrod->approx_mcast; - p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); + p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]); } } @@ -1945,6 +1955,11 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn, p_ah->tx_1519_to_max_byte_packets = port_stats.eth.u1.ah1.t1519_to_max; } + + p_common->link_change_count = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, + link_change_count)); } void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, @@ -2061,11 +2076,14 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev) /* PORT statistics are not necessarily reset, so we need to * read and create a baseline for future statistics. + * Link change stat is maintained by MFW, return its value as is. */ if (!p_dev->reset_stats) DP_INFO(p_dev, "Reset stats not allocated\n"); - else + else { _ecore_get_vport_stats(p_dev, p_dev->reset_stats); + p_dev->reset_stats->common.link_change_count = 0; + } } void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, @@ -2150,7 +2168,7 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, p_ramrod->flow_id_valid = 0; p_ramrod->flow_id = 0; - p_ramrod->vport_id = abs_vport_id; + p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id); p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; @@ -2267,3 +2285,22 @@ out: return rc; } + +enum _ecore_status_t +ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_queue_cid *p_cid, u32 rate) +{ + struct ecore_mcp_link_state *p_link; + u8 vport; + + vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id); + p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output; + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "About to rate limit qm vport %d for queue %d with rate %d\n", + vport, p_cid->rel.queue_id, rate); + + return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate, + p_link->speed); +} diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h index f4212cf2..bea6a6df 100644 --- a/drivers/net/qede/base/ecore_l2.h +++ b/drivers/net/qede/base/ecore_l2.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h index ed9837bf..43ebbd12 100644 --- a/drivers/net/qede/base/ecore_l2_api.h +++ b/drivers/net/qede/base/ecore_l2_api.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -332,7 +332,7 @@ struct ecore_sp_vport_update_params { u8 anti_spoofing_en; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; - unsigned long bins[8]; + u32 bins[8]; struct ecore_rss_params *rss_params; struct ecore_filter_accept_flags accept_flags; struct ecore_sge_tpa_params *sge_tpa_params; diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c index 8edd2e96..784d28c5 100644 --- a/drivers/net/qede/base/ecore_mcp.c +++ b/drivers/net/qede/base/ecore_mcp.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -9,6 +9,7 @@ #include "bcm_osal.h" #include "ecore.h" #include "ecore_status.h" +#include "nvm_cfg.h" #include "ecore_mcp.h" #include "mcp_public.h" #include "reg_addr.h" @@ -240,15 +241,24 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, /* Allocate mcp_info structure */ p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, - sizeof(*p_hwfn->mcp_info)); - if (!p_hwfn->mcp_info) - goto err; + sizeof(*p_hwfn->mcp_info)); + if (!p_hwfn->mcp_info) { + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n"); + return ECORE_NOMEM; + } p_info = p_hwfn->mcp_info; /* Initialize the MFW spinlocks */ #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock); - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) { + OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } #endif OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); OSAL_SPIN_LOCK_INIT(&p_info->link_lock); @@ -272,7 +282,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; err: - DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n"); ecore_mcp_free(p_hwfn); return ECORE_NOMEM; } @@ -593,7 +603,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, /* MCP not initialized */ if (!ecore_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n"); return ECORE_BUSY; } @@ -2121,19 +2131,20 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_media_type) { + enum _ecore_status_t rc = ECORE_SUCCESS; /* TODO - Add support for VFs */ if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; if (!ecore_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); return ECORE_BUSY; } if (!p_ptt) { *p_media_type = MEDIA_UNSPECIFIED; - return ECORE_INVAL; + rc = ECORE_INVAL; } else { *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + @@ -2144,6 +2155,197 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; } +enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_tranceiver_type) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + if (!p_ptt) { + *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE; + rc = ECORE_INVAL; + } else { + *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); + } + + return rc; +} + +static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) +{ + if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && + ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && + (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) + return 1; + + return 0; +} + +enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_speed_mask) +{ + u32 transceiver_data, transceiver_type, transceiver_state; + + ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data); + + transceiver_state = GET_MFW_FIELD(transceiver_data, + ETH_TRANSCEIVER_STATE); + + transceiver_type = GET_MFW_FIELD(transceiver_data, + ETH_TRANSCEIVER_TYPE); + + if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) + return ECORE_INVAL; + + switch (transceiver_type) { + case ETH_TRANSCEIVER_TYPE_1G_LX: + case ETH_TRANSCEIVER_TYPE_1G_SX: + case ETH_TRANSCEIVER_TYPE_1G_PCC: + case ETH_TRANSCEIVER_TYPE_1G_ACC: + case ETH_TRANSCEIVER_TYPE_1000BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_SR: + case ETH_TRANSCEIVER_TYPE_10G_LR: + case ETH_TRANSCEIVER_TYPE_10G_LRM: + case ETH_TRANSCEIVER_TYPE_10G_ER: + case ETH_TRANSCEIVER_TYPE_10G_PCC: + case ETH_TRANSCEIVER_TYPE_10G_ACC: + case ETH_TRANSCEIVER_TYPE_4x10G: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_LR4: + case ETH_TRANSCEIVER_TYPE_40G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_AOC: + case ETH_TRANSCEIVER_TYPE_100G_SR4: + case ETH_TRANSCEIVER_TYPE_100G_LR4: + case ETH_TRANSCEIVER_TYPE_100G_ER4: + case ETH_TRANSCEIVER_TYPE_100G_ACC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_SR: + case ETH_TRANSCEIVER_TYPE_25G_LR: + case ETH_TRANSCEIVER_TYPE_25G_AOC: + case ETH_TRANSCEIVER_TYPE_25G_ACC_S: + case ETH_TRANSCEIVER_TYPE_25G_ACC_M: + case ETH_TRANSCEIVER_TYPE_25G_ACC_L: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_CA_N: + case ETH_TRANSCEIVER_TYPE_25G_CA_S: + case ETH_TRANSCEIVER_TYPE_25G_CA_L: + case ETH_TRANSCEIVER_TYPE_4x25G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_XLPPI: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + default: + DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n", + transceiver_type); + *p_speed_mask = 0xff; + break; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_board_config) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + if (!p_ptt) { + *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; + rc = ECORE_INVAL; + } else { + nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, + MISC_REG_GEN_PURP_CR0); + nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, + nvm_cfg_addr + 4); + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + *p_board_config = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + board_cfg)); + } + + return rc; +} + /* @DPDK */ /* Old MFW has a global configuration for all PFs regarding RDMA support */ static void diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h index 6afaf7de..c422736f 100644 --- a/drivers/net/qede/base/ecore_mcp.h +++ b/drivers/net/qede/base/ecore_mcp.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h index 225890e2..06b33bb8 100644 --- a/drivers/net/qede/base/ecore_mcp_api.h +++ b/drivers/net/qede/base/ecore_mcp_api.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -594,6 +594,52 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *media_type); +/** + * @brief Get transceiver data of the port. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_transceiver_type - media type value + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_tranceiver_type); + +/** + * @brief Get transceiver supported speed mask. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_speed_mask - Bit mask of all supported speeds. + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ + +enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_speed_mask); + +/** + * @brief Get board configuration. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_board_config - Board config. + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_board_config); + /** * @brief - Sends a command to the MCP mailbox. * diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c index 3a1de094..b48076a3 100644 --- a/drivers/net/qede/base/ecore_mng_tlv.c +++ b/drivers/net/qede/base/ecore_mng_tlv.c @@ -1,3 +1,11 @@ +/* + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + #include "bcm_osal.h" #include "ecore.h" #include "ecore_status.h" diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h index abca7408..f049d821 100644 --- a/drivers/net/qede/base/ecore_proto_if.h +++ b/drivers/net/qede/base/ecore_proto_if.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -31,6 +31,9 @@ struct ecore_eth_pf_params { * This will set the maximal number of configured steering-filters. */ u32 num_arfs_filters; + + /* To allow VF to change its MAC despite of PF set forced MAC. */ + bool allow_vf_mac_change; }; /* Most of the parameters below are described in the FW iSCSI / TCP HSI */ diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h index 1d085815..7dec2dd5 100644 --- a/drivers/net/qede/base/ecore_rt_defs.h +++ b/drivers/net/qede/base/ecore_rt_defs.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -205,329 +205,336 @@ #define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082 #define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083 #define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34211 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34212 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34213 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34214 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34215 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34216 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34217 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34218 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34219 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34220 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34221 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34222 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34223 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34224 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34225 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34226 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34227 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34228 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34229 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34230 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34231 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34232 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34233 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34234 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34235 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34236 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34237 -#define QM_REG_PQTX2PF_0_RT_OFFSET 34238 -#define QM_REG_PQTX2PF_1_RT_OFFSET 34239 -#define QM_REG_PQTX2PF_2_RT_OFFSET 34240 -#define QM_REG_PQTX2PF_3_RT_OFFSET 34241 -#define QM_REG_PQTX2PF_4_RT_OFFSET 34242 -#define QM_REG_PQTX2PF_5_RT_OFFSET 34243 -#define QM_REG_PQTX2PF_6_RT_OFFSET 34244 -#define QM_REG_PQTX2PF_7_RT_OFFSET 34245 -#define QM_REG_PQTX2PF_8_RT_OFFSET 34246 -#define QM_REG_PQTX2PF_9_RT_OFFSET 34247 -#define QM_REG_PQTX2PF_10_RT_OFFSET 34248 -#define QM_REG_PQTX2PF_11_RT_OFFSET 34249 -#define QM_REG_PQTX2PF_12_RT_OFFSET 34250 -#define QM_REG_PQTX2PF_13_RT_OFFSET 34251 -#define QM_REG_PQTX2PF_14_RT_OFFSET 34252 -#define QM_REG_PQTX2PF_15_RT_OFFSET 34253 -#define QM_REG_PQTX2PF_16_RT_OFFSET 34254 -#define QM_REG_PQTX2PF_17_RT_OFFSET 34255 -#define QM_REG_PQTX2PF_18_RT_OFFSET 34256 -#define QM_REG_PQTX2PF_19_RT_OFFSET 34257 -#define QM_REG_PQTX2PF_20_RT_OFFSET 34258 -#define QM_REG_PQTX2PF_21_RT_OFFSET 34259 -#define QM_REG_PQTX2PF_22_RT_OFFSET 34260 -#define QM_REG_PQTX2PF_23_RT_OFFSET 34261 -#define QM_REG_PQTX2PF_24_RT_OFFSET 34262 -#define QM_REG_PQTX2PF_25_RT_OFFSET 34263 -#define QM_REG_PQTX2PF_26_RT_OFFSET 34264 -#define QM_REG_PQTX2PF_27_RT_OFFSET 34265 -#define QM_REG_PQTX2PF_28_RT_OFFSET 34266 -#define QM_REG_PQTX2PF_29_RT_OFFSET 34267 -#define QM_REG_PQTX2PF_30_RT_OFFSET 34268 -#define QM_REG_PQTX2PF_31_RT_OFFSET 34269 -#define QM_REG_PQTX2PF_32_RT_OFFSET 34270 -#define QM_REG_PQTX2PF_33_RT_OFFSET 34271 -#define QM_REG_PQTX2PF_34_RT_OFFSET 34272 -#define QM_REG_PQTX2PF_35_RT_OFFSET 34273 -#define QM_REG_PQTX2PF_36_RT_OFFSET 34274 -#define QM_REG_PQTX2PF_37_RT_OFFSET 34275 -#define QM_REG_PQTX2PF_38_RT_OFFSET 34276 -#define QM_REG_PQTX2PF_39_RT_OFFSET 34277 -#define QM_REG_PQTX2PF_40_RT_OFFSET 34278 -#define QM_REG_PQTX2PF_41_RT_OFFSET 34279 -#define QM_REG_PQTX2PF_42_RT_OFFSET 34280 -#define QM_REG_PQTX2PF_43_RT_OFFSET 34281 -#define QM_REG_PQTX2PF_44_RT_OFFSET 34282 -#define QM_REG_PQTX2PF_45_RT_OFFSET 34283 -#define QM_REG_PQTX2PF_46_RT_OFFSET 34284 -#define QM_REG_PQTX2PF_47_RT_OFFSET 34285 -#define QM_REG_PQTX2PF_48_RT_OFFSET 34286 -#define QM_REG_PQTX2PF_49_RT_OFFSET 34287 -#define QM_REG_PQTX2PF_50_RT_OFFSET 34288 -#define QM_REG_PQTX2PF_51_RT_OFFSET 34289 -#define QM_REG_PQTX2PF_52_RT_OFFSET 34290 -#define QM_REG_PQTX2PF_53_RT_OFFSET 34291 -#define QM_REG_PQTX2PF_54_RT_OFFSET 34292 -#define QM_REG_PQTX2PF_55_RT_OFFSET 34293 -#define QM_REG_PQTX2PF_56_RT_OFFSET 34294 -#define QM_REG_PQTX2PF_57_RT_OFFSET 34295 -#define QM_REG_PQTX2PF_58_RT_OFFSET 34296 -#define QM_REG_PQTX2PF_59_RT_OFFSET 34297 -#define QM_REG_PQTX2PF_60_RT_OFFSET 34298 -#define QM_REG_PQTX2PF_61_RT_OFFSET 34299 -#define QM_REG_PQTX2PF_62_RT_OFFSET 34300 -#define QM_REG_PQTX2PF_63_RT_OFFSET 34301 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34302 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34303 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34304 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34305 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34306 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34307 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34308 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34309 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34310 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34311 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34312 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34313 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34314 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34315 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34316 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34317 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34318 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34319 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34320 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34321 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34322 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34323 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34324 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34325 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34326 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34327 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34328 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34329 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34330 +#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211 +#define QM_REG_PTRTBLOTHER_RT_SIZE 256 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493 +#define QM_REG_PQTX2PF_0_RT_OFFSET 34494 +#define QM_REG_PQTX2PF_1_RT_OFFSET 34495 +#define QM_REG_PQTX2PF_2_RT_OFFSET 34496 +#define QM_REG_PQTX2PF_3_RT_OFFSET 34497 +#define QM_REG_PQTX2PF_4_RT_OFFSET 34498 +#define QM_REG_PQTX2PF_5_RT_OFFSET 34499 +#define QM_REG_PQTX2PF_6_RT_OFFSET 34500 +#define QM_REG_PQTX2PF_7_RT_OFFSET 34501 +#define QM_REG_PQTX2PF_8_RT_OFFSET 34502 +#define QM_REG_PQTX2PF_9_RT_OFFSET 34503 +#define QM_REG_PQTX2PF_10_RT_OFFSET 34504 +#define QM_REG_PQTX2PF_11_RT_OFFSET 34505 +#define QM_REG_PQTX2PF_12_RT_OFFSET 34506 +#define QM_REG_PQTX2PF_13_RT_OFFSET 34507 +#define QM_REG_PQTX2PF_14_RT_OFFSET 34508 +#define QM_REG_PQTX2PF_15_RT_OFFSET 34509 +#define QM_REG_PQTX2PF_16_RT_OFFSET 34510 +#define QM_REG_PQTX2PF_17_RT_OFFSET 34511 +#define QM_REG_PQTX2PF_18_RT_OFFSET 34512 +#define QM_REG_PQTX2PF_19_RT_OFFSET 34513 +#define QM_REG_PQTX2PF_20_RT_OFFSET 34514 +#define QM_REG_PQTX2PF_21_RT_OFFSET 34515 +#define QM_REG_PQTX2PF_22_RT_OFFSET 34516 +#define QM_REG_PQTX2PF_23_RT_OFFSET 34517 +#define QM_REG_PQTX2PF_24_RT_OFFSET 34518 +#define QM_REG_PQTX2PF_25_RT_OFFSET 34519 +#define QM_REG_PQTX2PF_26_RT_OFFSET 34520 +#define QM_REG_PQTX2PF_27_RT_OFFSET 34521 +#define QM_REG_PQTX2PF_28_RT_OFFSET 34522 +#define QM_REG_PQTX2PF_29_RT_OFFSET 34523 +#define QM_REG_PQTX2PF_30_RT_OFFSET 34524 +#define QM_REG_PQTX2PF_31_RT_OFFSET 34525 +#define QM_REG_PQTX2PF_32_RT_OFFSET 34526 +#define QM_REG_PQTX2PF_33_RT_OFFSET 34527 +#define QM_REG_PQTX2PF_34_RT_OFFSET 34528 +#define QM_REG_PQTX2PF_35_RT_OFFSET 34529 +#define QM_REG_PQTX2PF_36_RT_OFFSET 34530 +#define QM_REG_PQTX2PF_37_RT_OFFSET 34531 +#define QM_REG_PQTX2PF_38_RT_OFFSET 34532 +#define QM_REG_PQTX2PF_39_RT_OFFSET 34533 +#define QM_REG_PQTX2PF_40_RT_OFFSET 34534 +#define QM_REG_PQTX2PF_41_RT_OFFSET 34535 +#define QM_REG_PQTX2PF_42_RT_OFFSET 34536 +#define QM_REG_PQTX2PF_43_RT_OFFSET 34537 +#define QM_REG_PQTX2PF_44_RT_OFFSET 34538 +#define QM_REG_PQTX2PF_45_RT_OFFSET 34539 +#define QM_REG_PQTX2PF_46_RT_OFFSET 34540 +#define QM_REG_PQTX2PF_47_RT_OFFSET 34541 +#define QM_REG_PQTX2PF_48_RT_OFFSET 34542 +#define QM_REG_PQTX2PF_49_RT_OFFSET 34543 +#define QM_REG_PQTX2PF_50_RT_OFFSET 34544 +#define QM_REG_PQTX2PF_51_RT_OFFSET 34545 +#define QM_REG_PQTX2PF_52_RT_OFFSET 34546 +#define QM_REG_PQTX2PF_53_RT_OFFSET 34547 +#define QM_REG_PQTX2PF_54_RT_OFFSET 34548 +#define QM_REG_PQTX2PF_55_RT_OFFSET 34549 +#define QM_REG_PQTX2PF_56_RT_OFFSET 34550 +#define QM_REG_PQTX2PF_57_RT_OFFSET 34551 +#define QM_REG_PQTX2PF_58_RT_OFFSET 34552 +#define QM_REG_PQTX2PF_59_RT_OFFSET 34553 +#define QM_REG_PQTX2PF_60_RT_OFFSET 34554 +#define QM_REG_PQTX2PF_61_RT_OFFSET 34555 +#define QM_REG_PQTX2PF_62_RT_OFFSET 34556 +#define QM_REG_PQTX2PF_63_RT_OFFSET 34557 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586 #define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34586 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842 #define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 34842 +#define QM_REG_RLGLBLCRD_RT_OFFSET 35098 #define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 35098 -#define QM_REG_RLPFPERIOD_RT_OFFSET 35099 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35100 -#define QM_REG_RLPFINCVAL_RT_OFFSET 35101 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354 +#define QM_REG_RLPFPERIOD_RT_OFFSET 35355 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356 +#define QM_REG_RLPFINCVAL_RT_OFFSET 35357 #define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35117 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373 #define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 35133 +#define QM_REG_RLPFCRD_RT_OFFSET 35389 #define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 35149 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35150 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35151 +#define QM_REG_RLPFENABLE_RT_OFFSET 35405 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407 #define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35167 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423 #define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 35183 +#define QM_REG_WFQPFCRD_RT_OFFSET 35439 #define QM_REG_WFQPFCRD_RT_SIZE 256 -#define QM_REG_WFQPFENABLE_RT_OFFSET 35439 -#define QM_REG_WFQVPENABLE_RT_OFFSET 35440 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35441 +#define QM_REG_WFQPFENABLE_RT_OFFSET 35695 +#define QM_REG_WFQVPENABLE_RT_OFFSET 35696 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697 #define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 35953 +#define QM_REG_TXPQMAP_RT_OFFSET 36209 #define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36465 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 36977 +#define QM_REG_WFQVPCRD_RT_OFFSET 37233 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 37489 +#define QM_REG_WFQVPMAP_RT_OFFSET 37745 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 38001 +#define QM_REG_PTRTBLTX_RT_OFFSET 38257 +#define QM_REG_PTRTBLTX_RT_SIZE 1024 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 320 -#define QM_REG_VOQCRDLINE_RT_OFFSET 38321 +#define QM_REG_VOQCRDLINE_RT_OFFSET 39601 #define QM_REG_VOQCRDLINE_RT_SIZE 36 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 38357 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637 #define QM_REG_VOQINITCRDLINE_RT_SIZE 36 -#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 38393 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 38394 -#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 38395 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 38396 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 38397 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 38398 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 38399 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 38400 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 38401 +#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 38405 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 38409 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 38441 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 38457 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 38473 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 38489 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 38505 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 38506 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 38507 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787 #define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 38515 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 39539 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 40051 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 40563 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 41075 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 41587 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867 #define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 41619 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 41620 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 41621 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 41622 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 41623 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 41624 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 41625 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 41626 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 41627 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 41628 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 41629 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 41630 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 41631 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 41632 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 41633 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 41634 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 41635 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 41636 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 41637 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 41638 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 41639 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 41640 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 41641 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 41642 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 41643 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 41644 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 41645 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 41646 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 41647 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 41648 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 41649 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 41650 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 41651 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 41652 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 41653 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 41654 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 41655 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 41656 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 41657 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 41658 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 41659 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 41660 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 41661 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 41662 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 41663 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 41664 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 41665 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 41666 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 41667 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 41668 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 41669 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 41670 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 41671 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 41672 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 41673 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 41674 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 41675 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 41676 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 41677 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 41678 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 41679 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 41680 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 41681 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 41682 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 41683 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 41684 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 41685 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 41686 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 41687 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 41688 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 41689 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 41690 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 41691 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 41692 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 41693 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 41694 -#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 41695 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 41696 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 41697 -#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 41698 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 41699 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 41700 -#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 41701 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 41702 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 41703 -#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 41704 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 41705 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 41706 -#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 41707 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 41708 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 41709 -#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 41710 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 41711 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 41712 -#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 41713 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 41714 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 41715 -#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 41716 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 41717 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 41718 -#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 41719 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 41720 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 41721 -#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 41722 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 41723 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 41724 -#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 41725 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 41726 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 41727 -#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 41728 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 41729 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 41730 -#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 41731 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 41732 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 41733 -#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 41734 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 41735 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 41736 -#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 41737 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 41738 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 41739 -#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 41740 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 41741 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 41742 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974 +#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977 +#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980 +#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983 +#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986 +#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989 +#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992 +#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995 +#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998 +#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001 +#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004 +#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007 +#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010 +#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013 +#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016 +#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019 +#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022 -#define RUNTIME_ARRAY_SIZE 41743 +#define RUNTIME_ARRAY_SIZE 43023 + +/* Init Callbacks */ +#define DMAE_READY_CB 0 #endif /* __RT_DEFS_H__ */ diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h index 86e84964..469bf1d6 100644 --- a/drivers/net/qede/base/ecore_sp_api.h +++ b/drivers/net/qede/base/ecore_sp_api.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c index 7598e7a6..47c1febf 100644 --- a/drivers/net/qede/base/ecore_sp_commands.c +++ b/drivers/net/qede/base/ecore_sp_commands.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -295,6 +295,7 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn, } #define ETH_P_8021Q 0x8100 +#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, @@ -308,7 +309,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, struct ecore_sp_init_data init_data; enum _ecore_status_t rc = ECORE_NOTIMPL; u8 page_cnt; - int i; + u8 i; /* update initial eq producer */ ecore_eq_prod_update(p_hwfn, @@ -343,18 +344,27 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, p_ramrod->outer_tag_config.outer_tag.tci = OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan); + if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; + } else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING, + &p_hwfn->p_dev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + } + + p_ramrod->outer_tag_config.pri_map_valid = 1; + for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) + p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; + /* enable_stag_pri_change should be set if port is in BD mode or, + * UFP with Host Control mode or, UFP with DCB over base interface. + */ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) { - p_ramrod->outer_tag_config.outer_tag.tpid = - OSAL_CPU_TO_LE16(ETH_P_8021Q); - if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) + if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) || + (p_hwfn->p_dcbx_info->results.dcbx_enabled)) p_ramrod->outer_tag_config.enable_stag_pri_change = 1; else p_ramrod->outer_tag_config.enable_stag_pri_change = 0; - p_ramrod->outer_tag_config.pri_map_valid = 1; - for (i = 0; i < 8; i++) - p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = - (u8)i; } /* Place EQ address in RAMROD */ @@ -451,7 +461,8 @@ enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn) return rc; p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; - if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) + if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) || + (p_hwfn->p_dcbx_info->results.dcbx_enabled)) p_ent->ramrod.pf_update.enable_stag_pri_change = 1; else p_ent->ramrod.pf_update.enable_stag_pri_change = 0; diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h index 98009c65..d160a76e 100644 --- a/drivers/net/qede/base/ecore_sp_commands.h +++ b/drivers/net/qede/base/ecore_sp_commands.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c index 70ffa8cd..db169e6e 100644 --- a/drivers/net/qede/base/ecore_spq.c +++ b/drivers/net/qede/base/ecore_spq.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -30,7 +30,7 @@ #define SPQ_BLOCK_DELAY_MAX_ITER (10) #define SPQ_BLOCK_DELAY_US (10) -#define SPQ_BLOCK_SLEEP_MAX_ITER (1000) +#define SPQ_BLOCK_SLEEP_MAX_ITER (200) #define SPQ_BLOCK_SLEEP_MS (5) /*************************************************************************** @@ -60,8 +60,12 @@ static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn, u32 iter_cnt; comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie; - iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER + iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter : SPQ_BLOCK_DELAY_MAX_ITER; +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter) + iter_cnt *= 5; +#endif while (iter_cnt--) { OSAL_POLL_MODE_DPC(p_hwfn); @@ -138,6 +142,14 @@ err: return ECORE_BUSY; } +void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn, + u32 spq_timeout_ms) +{ + p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ? + spq_timeout_ms / SPQ_BLOCK_SLEEP_MS : + SPQ_BLOCK_SLEEP_MAX_ITER; +} + /*************************************************************************** * SPQ entries inner API ***************************************************************************/ @@ -389,7 +401,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem) /* Allocate EQ struct */ p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq)); if (!p_eq) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_eq'\n"); return ECORE_NOMEM; } @@ -402,7 +414,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem) num_elem, sizeof(union event_ring_element), &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n"); goto eq_allocate_fail; } @@ -547,8 +559,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) p_spq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq)); if (!p_spq) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_spq'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n"); return ECORE_NOMEM; } @@ -560,7 +571,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) 0, /* N/A when the mode is SINGLE */ sizeof(struct slow_path_element), &p_spq->chain, OSAL_NULL)) { - DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n"); goto spq_allocate_fail; } @@ -576,7 +587,8 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) p_spq->p_phys = p_phys; #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock)) + goto spq_allocate_fail; #endif p_hwfn->p_spq = p_spq; @@ -630,9 +642,7 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent) if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) { p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent)); if (!p_ent) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate an SPQ entry for a pending" - " ramrod\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n"); rc = ECORE_NOMEM; goto out_unlock; } @@ -1013,7 +1023,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn) p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq)); if (!p_consq) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_consq'\n"); return ECORE_NOMEM; } @@ -1026,7 +1036,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn) ECORE_CHAIN_PAGE_SIZE / 0x80, 0x80, &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain"); + DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain"); goto consq_allocate_fail; } diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h index 526cff08..7d9be3e9 100644 --- a/drivers/net/qede/base/ecore_spq.h +++ b/drivers/net/qede/base/ecore_spq.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -116,6 +116,9 @@ struct ecore_spq { dma_addr_t p_phys; struct ecore_spq_entry *p_virt; + /* SPQ max sleep iterations used in __ecore_spq_block() */ + u32 block_sleep_max_iter; + /* Bitmap for handling out-of-order completions */ #define SPQ_RING_SIZE \ (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) @@ -149,6 +152,16 @@ struct ecore_spq { struct ecore_port; struct ecore_hwfn; +/** + * @brief ecore_set_spq_block_timeout - calculates the maximum sleep + * iterations used in __ecore_spq_block(); + * + * @param p_hwfn + * @param spq_timeout_ms + */ +void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn, + u32 spq_timeout_ms); + /** * @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that * Pends it to the future list. diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c index b1e26d6f..451aabb1 100644 --- a/drivers/net/qede/base/ecore_sriov.c +++ b/drivers/net/qede/base/ecore_sriov.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -590,8 +590,7 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn) p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov)); if (!p_sriov) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_sriov'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n"); return ECORE_NOMEM; } @@ -648,7 +647,7 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn) p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_dev->p_iov_info)); if (!p_dev->p_iov_info) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Can't support IOV due to lack of memory\n"); return ECORE_NOMEM; } @@ -1968,7 +1967,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, if (!p_vf->vport_instance) return ECORE_INVAL; - if (events & (1 << MAC_ADDR_FORCED)) { + if ((events & (1 << MAC_ADDR_FORCED)) || + p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) { /* Since there's no way [currently] of removing the MAC, * we can always assume this means we need to force it. */ @@ -1989,7 +1989,11 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, return rc; } - p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + p_vf->configured_features |= + 1 << VFPF_BULLETIN_MAC_ADDR; + else + p_vf->configured_features |= 1 << MAC_ADDR_FORCED; } if (events & (1 << VLAN_ADDR_FORCED)) { @@ -2975,8 +2979,7 @@ ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn, p_data->update_approx_mcast_flg = 1; OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins, - sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST; } @@ -4370,7 +4373,11 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, return; } - feature = 1 << MAC_ADDR_FORCED; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + feature = 1 << VFPF_BULLETIN_MAC_ADDR; + else + feature = 1 << MAC_ADDR_FORCED; + OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); vf_info->bulletin.p_virt->valid_bitmap |= feature; @@ -4411,9 +4418,13 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, vf_info->bulletin.p_virt->valid_bitmap |= feature; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); + return ECORE_SUCCESS; } +#ifndef LINUX_REMOVE enum _ecore_status_t ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, bool b_untagged_only, int vfid) @@ -4470,6 +4481,7 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, *opaque_fid = vf_info->opaque_fid; } +#endif void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn, u16 pvid, int vfid) @@ -4657,6 +4669,22 @@ u32 ecore_iov_pfvf_msg_length(void) return sizeof(union pfvf_tlvs); } +u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return OSAL_NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & + (1 << VFPF_BULLETIN_MAC_ADDR))) + return OSAL_NULL; + + return p_vf->bulletin.p_virt->mac; +} + u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) { struct ecore_vf_info *p_vf; diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h index 850b1052..8d846d3e 100644 --- a/drivers/net/qede/base/ecore_sriov.h +++ b/drivers/net/qede/base/ecore_sriov.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h index c77ec260..3af2b57d 100644 --- a/drivers/net/qede/base/ecore_status.h +++ b/drivers/net/qede/base/ecore_status.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h index 034cf1eb..f6459c35 100644 --- a/drivers/net/qede/base/ecore_utils.h +++ b/drivers/net/qede/base/ecore_utils.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c index e0f2dd5a..b7b3b872 100644 --- a/drivers/net/qede/base/ecore_vf.c +++ b/drivers/net/qede/base/ecore_vf.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -1275,8 +1275,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, resp_size += sizeof(struct pfvf_def_resp_tlv); OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins, - sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); } update_rx = p_params->accept_flags.update_rx_mode_config; @@ -1473,7 +1472,7 @@ void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, u32 bit; bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); - OSAL_SET_BIT(bit, sp_params.bins); + sp_params.bins[bit / 32] |= 1 << (bit % 32); } } diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h index de2758cb..e26b30bf 100644 --- a/drivers/net/qede/base/ecore_vf.h +++ b/drivers/net/qede/base/ecore_vf.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h index 9815cf8a..af7bc36b 100644 --- a/drivers/net/qede/base/ecore_vf_api.h +++ b/drivers/net/qede/base/ecore_vf_api.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h index ecb00649..dce937e0 100644 --- a/drivers/net/qede/base/ecore_vfpf_if.h +++ b/drivers/net/qede/base/ecore_vfpf_if.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -396,7 +396,13 @@ struct vfpf_vport_update_mcast_bin_tlv { struct channel_tlv tl; u8 padding[4]; - u64 bins[8]; + /* This was a mistake; There are only 256 approx bins, + * and in HSI they're divided into 32-bit values. + * As old VFs used to set-bit to the values on its side, + * the upper half of the array is never expected to contain any data. + */ + u64 bins[4]; + u64 obsolete_bins[4]; }; struct vfpf_vport_update_accept_param_tlv { diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h index 45a0356d..9de49b64 100644 --- a/drivers/net/qede/base/eth_common.h +++ b/drivers/net/qede/base/eth_common.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -119,6 +119,9 @@ /* Number of etherType values configured by driver for control frame check */ #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 +/* GFS constants */ +#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */ + /* diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h index 81ca6634..752473e1 100644 --- a/drivers/net/qede/base/mcp_public.h +++ b/drivers/net/qede/base/mcp_public.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -800,6 +800,7 @@ struct public_port { #define ETH_TRANSCEIVER_TYPE_4x10G 0x1f #define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 #define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 +#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 @@ -1777,6 +1778,8 @@ struct public_drv_mb { #define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 /* MFW supports EEE */ #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +/* MFW supports DRV_LOAD Timeout */ +#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004 /* MFW supports virtual link */ #define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h index c99e805d..a20d0674 100644 --- a/drivers/net/qede/base/nvm_cfg.h +++ b/drivers/net/qede/base/nvm_cfg.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h index ad15d28a..71f0ca13 100644 --- a/drivers/net/qede/base/reg_addr.h +++ b/drivers/net/qede/base/reg_addr.h @@ -1,15 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. - */ - -/* - * Copyright (c) 2016 QLogic Corporation. - * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -1222,3 +1214,5 @@ #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10) #define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL #define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL + +#define RSS_REG_RSS_RAM_MASK 0x238c10UL diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index a91f4368..3206cc6c 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -16,7 +16,7 @@ int qede_logtype_init; int qede_logtype_driver; static const struct qed_eth_ops *qed_ops; -static int64_t timer_period = 1; +#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ /* VXLAN tunnel classification mapping */ const struct _qede_udp_tunn_types { @@ -499,6 +499,9 @@ qede_start_vport(struct qede_dev *qdev, uint16_t mtu) return 0; } +#define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" +#define QEDE_VF_TX_SWITCHING "vf_tx_switching" + /* Activate or deactivate vport via vport-update */ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) { @@ -516,10 +519,12 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) params.vport_active_rx_flg = flg; params.vport_active_tx_flg = flg; if (!qdev->enable_tx_switching) { - if (IS_VF(edev)) { + if ((QEDE_NPAR_TX_SWITCHING != NULL) || + ((QEDE_VF_TX_SWITCHING != NULL) && IS_VF(edev))) { params.update_tx_switching_flg = 1; params.tx_switching_flg = !flg; - DP_INFO(edev, "VF tx-switching is disabled\n"); + DP_INFO(edev, "%s tx-switching is disabled\n", + QEDE_NPAR_TX_SWITCHING ? "NPAR" : "VF"); } } for_each_hwfn(edev, i) { @@ -591,6 +596,8 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) } } qdev->enable_lro = flg; + eth_dev->data->lro = flg; + DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); return 0; @@ -779,6 +786,36 @@ qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss, return rc; } +static int +qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_tunnel_info tunn; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.ip_gre.b_update_mode = true; + tunn.ip_gre.b_mode_enabled = enable; + tunn.ip_gre.tun_cls = clss; + tunn.ip_gre.tun_cls = clss; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + + rc = qede_tunnel_update(qdev, &tunn); + if (rc == ECORE_SUCCESS) { + qdev->ipgre.enable = enable; + DP_INFO(edev, "IPGRE is %s\n", + enable ? "enabled" : "disabled"); + } else { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + clss); + } + + return rc; +} + static int qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss, enum rte_eth_tunnel_type tunn_type, bool enable) @@ -792,6 +829,9 @@ qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss, case RTE_TUNNEL_TYPE_GENEVE: rc = qede_geneve_enable(eth_dev, clss, enable); break; + case RTE_TUNNEL_TYPE_IP_IN_GRE: + rc = qede_ipgre_enable(eth_dev, clss, enable); + break; default: rc = -EINVAL; break; @@ -817,10 +857,10 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, ETHER_ADDR_LEN) == 0) && ucast->vni == tmp->vni && ucast->vlan == tmp->vlan) { - DP_ERR(edev, "Unicast MAC is already added" - " with vlan = %u, vni = %u\n", - ucast->vlan, ucast->vni); - return -EEXIST; + DP_INFO(edev, "Unicast MAC is already added" + " with vlan = %u, vni = %u\n", + ucast->vlan, ucast->vni); + return 0; } } u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), @@ -854,110 +894,95 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, } static int -qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast, - bool add) +qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, + uint32_t mc_addrs_num) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct ether_addr *mac_addr; - struct qede_mcast_entry *tmp = NULL; - struct qede_mcast_entry *m; + struct ecore_filter_mcast mcast; + struct qede_mcast_entry *m = NULL; + uint8_t i; + int rc; - mac_addr = (struct ether_addr *)mcast->mac; - if (add) { - SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { - if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) { - DP_ERR(edev, - "Multicast MAC is already added\n"); - return -EEXIST; - } - } + for (i = 0; i < mc_addrs_num; i++) { m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), - RTE_CACHE_LINE_SIZE); + RTE_CACHE_LINE_SIZE); if (!m) { - DP_ERR(edev, - "Did not allocate memory for mcast\n"); + DP_ERR(edev, "Did not allocate memory for mcast\n"); return -ENOMEM; } - ether_addr_copy(mac_addr, &m->mac); + ether_addr_copy(&mc_addrs[i], &m->mac); SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); - qdev->num_mc_addr++; - } else { - SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { - if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) - break; - } - if (tmp == NULL) { - DP_INFO(edev, "Multicast mac is not found\n"); - return -EINVAL; - } - SLIST_REMOVE(&qdev->mc_list_head, tmp, - qede_mcast_entry, list); - qdev->num_mc_addr--; + } + memset(&mcast, 0, sizeof(mcast)); + mcast.num_mc_addrs = mc_addrs_num; + mcast.opcode = ECORE_FILTER_ADD; + for (i = 0; i < mc_addrs_num; i++) + ether_addr_copy(&mc_addrs[i], (struct ether_addr *) + &mcast.mac[i]); + rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); + return -1; } return 0; } +static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qede_mcast_entry *tmp = NULL; + struct ecore_filter_mcast mcast; + int j; + int rc; + + memset(&mcast, 0, sizeof(mcast)); + mcast.num_mc_addrs = qdev->num_mc_addr; + mcast.opcode = ECORE_FILTER_REMOVE; + j = 0; + SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { + ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]); + j++; + } + rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to delete multicast filter\n"); + return -1; + } + /* Init the list */ + while (!SLIST_EMPTY(&qdev->mc_list_head)) { + tmp = SLIST_FIRST(&qdev->mc_list_head); + SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); + } + SLIST_INIT(&qdev->mc_list_head); + + return 0; +} + static enum _ecore_status_t qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, bool add) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - enum _ecore_status_t rc; - struct ecore_filter_mcast mcast; - struct qede_mcast_entry *tmp; - uint16_t j = 0; + enum _ecore_status_t rc = ECORE_INVAL; - /* Multicast */ - if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) { - if (add) { - if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) { - DP_ERR(edev, - "Mcast filter table limit exceeded, " - "Please enable mcast promisc mode\n"); - return -ECORE_INVAL; - } - } - rc = qede_mcast_filter(eth_dev, ucast, add); - if (rc == 0) { - DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr); - memset(&mcast, 0, sizeof(mcast)); - mcast.num_mc_addrs = qdev->num_mc_addr; - mcast.opcode = ECORE_FILTER_ADD; - SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { - ether_addr_copy(&tmp->mac, - (struct ether_addr *)&mcast.mac[j]); - j++; - } - rc = ecore_filter_mcast_cmd(edev, &mcast, - ECORE_SPQ_MODE_CB, NULL); - } - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Failed to add multicast filter" - " rc = %d, op = %d\n", rc, add); - } - } else { /* Unicast */ - if (add) { - if (qdev->num_uc_addr >= - qdev->dev_info.num_mac_filters) { - DP_ERR(edev, - "Ucast filter table limit exceeded," - " Please enable promisc mode\n"); - return -ECORE_INVAL; - } - } - rc = qede_ucast_filter(eth_dev, ucast, add); - if (rc == 0) - rc = ecore_filter_ucast_cmd(edev, ucast, - ECORE_SPQ_MODE_CB, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", - rc, add); - } + if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { + DP_ERR(edev, "Ucast filter table limit exceeded," + " Please enable promisc mode\n"); + return ECORE_INVAL; } + rc = qede_ucast_filter(eth_dev, ucast, add); + if (rc == 0) + rc = ecore_filter_ucast_cmd(edev, ucast, + ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) + DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", + rc, add); + return rc; } @@ -998,10 +1023,10 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) ether_addr_copy(ð_dev->data->mac_addrs[index], (struct ether_addr *)&ucast.mac); - ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); + qede_mac_int_ops(eth_dev, &ucast, false); } -static void +static int qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); @@ -1010,12 +1035,11 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), mac_addr->addr_bytes)) { DP_ERR(edev, "Setting MAC address is not allowed\n"); - ether_addr_copy(&qdev->primary_mac, - ð_dev->data->mac_addrs[0]); - return; + return -EPERM; } qede_mac_addr_add(eth_dev, mac_addr, 0, 0); + return 0; } static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) @@ -1093,9 +1117,9 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { if (tmp->vid == vlan_id) { - DP_ERR(edev, "VLAN %u already configured\n", - vlan_id); - return -EEXIST; + DP_INFO(edev, "VLAN %u already configured\n", + vlan_id); + return 0; } } @@ -1166,10 +1190,10 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { - if (rxmode->hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) (void)qede_vlan_stripping(eth_dev, 1); else (void)qede_vlan_stripping(eth_dev, 0); @@ -1177,7 +1201,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) if (mask & ETH_VLAN_FILTER_MASK) { /* VLAN filtering kicks in when a VLAN is added */ - if (rxmode->hw_vlan_filter) { + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { qede_vlan_filter_set(eth_dev, 0, 1); } else { if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ @@ -1187,7 +1211,8 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) /* Signal app that VLAN filtering is still * enabled */ - rxmode->hw_vlan_filter = true; + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_VLAN_FILTER; } else { qede_vlan_filter_set(eth_dev, 0, 0); } @@ -1195,13 +1220,11 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) } if (mask & ETH_VLAN_EXTEND_MASK) - DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" - " and classification is based on outer tag only\n"); + DP_ERR(edev, "Extend VLAN not supported\n"); qdev->vlan_offload_mask = mask; - DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", - mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); + DP_INFO(edev, "VLAN offload mask %d\n", mask); return 0; } @@ -1267,19 +1290,19 @@ static void qede_fastpath_start(struct ecore_dev *edev) static int qede_dev_start(struct rte_eth_dev *eth_dev) { - struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; PMD_INIT_FUNC_TRACE(edev); /* Configure TPA parameters */ - if (rxmode->enable_lro) { + if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { if (qede_enable_tpa(eth_dev, true)) return -EINVAL; /* Enable scatter mode for LRO */ - if (!rxmode->enable_scatter) - eth_dev->data->scattered_rx = 1; + if (!eth_dev->data->scattered_rx) + rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; } /* Start queues */ @@ -1294,7 +1317,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) * Also, we would like to retain similar behavior in PF case, so we * don't do PF/VF specific check here. */ - if (rxmode->mq_mode == ETH_MQ_RX_RSS) + if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) if (qede_config_rss(eth_dev)) goto err; @@ -1336,13 +1359,15 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) /* Disable traffic */ ecore_hw_stop_fastpath(edev); /* TBD - loop */ + if (IS_PF(edev)) + qede_mac_addr_remove(eth_dev, 0); + DP_INFO(edev, "Device is stopped\n"); } -#define QEDE_TX_SWITCHING "vf_txswitch" - const char *valid_args[] = { - QEDE_TX_SWITCHING, + QEDE_NPAR_TX_SWITCHING, + QEDE_VF_TX_SWITCHING, NULL, }; @@ -1361,7 +1386,8 @@ static int qede_args_check(const char *key, const char *val, void *opaque) return errno; } - if (strcmp(QEDE_TX_SWITCHING, key) == 0) + if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || + (strcmp(QEDE_VF_TX_SWITCHING, key) == 0)) qdev->enable_tx_switching = !!tmp; return ret; @@ -1411,15 +1437,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) /* Check requirements for 100G mode */ if (ECORE_IS_CMT(edev)) { if (eth_dev->data->nb_rx_queues < 2 || - eth_dev->data->nb_tx_queues < 2) { + eth_dev->data->nb_tx_queues < 2) { DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); return -EINVAL; } if ((eth_dev->data->nb_rx_queues % 2 != 0) || - (eth_dev->data->nb_tx_queues % 2 != 0)) { + (eth_dev->data->nb_tx_queues % 2 != 0)) { DP_ERR(edev, - "100G mode needs even no. of RX/TX queues\n"); + "100G mode needs even no. of RX/TX queues\n"); return -EINVAL; } } @@ -1439,20 +1465,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) if (qede_args(eth_dev)) return -ENOTSUP; - /* Sanity checks and throw warnings */ - if (rxmode->enable_scatter) - eth_dev->data->scattered_rx = 1; - - if (!rxmode->hw_strip_crc) - DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); - - if (!rxmode->hw_ip_checksum) - DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " - "in hw\n"); - if (rxmode->header_split) - DP_INFO(edev, "Header split enable is not supported\n"); - if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode == - ETH_MQ_RX_RSS)) { + if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || + rxmode->mq_mode == ETH_MQ_RX_RSS)) { DP_ERR(edev, "Unsupported multi-queue mode\n"); return -ENOTSUP; } @@ -1467,19 +1481,23 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) return -ENOMEM; /* If jumbo enabled adjust MTU */ - if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) eth_dev->data->mtu = - eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN; + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + ETHER_HDR_LEN - ETHER_CRC_LEN; + + if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) + eth_dev->data->scattered_rx = 1; if (qede_start_vport(qdev, eth_dev->data->mtu)) return -1; + qdev->mtu = eth_dev->data->mtu; /* Enable VLAN offloads by default */ ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | - ETH_VLAN_FILTER_MASK | - ETH_VLAN_EXTEND_MASK); + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK); if (ret) return ret; @@ -1515,7 +1533,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, PMD_INIT_FUNC_TRACE(edev); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; dev_info->rx_desc_lim = qede_rx_desc_lim; @@ -1534,26 +1551,46 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; - - dev_info->default_txconf = (struct rte_eth_txconf) { - .txq_flags = QEDE_TXQ_FLAGS, - }; - - dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | + dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO); - + DEV_RX_OFFLOAD_TCP_LRO | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_STRIP); + dev_info->rx_queue_offload_capa = 0; + + /* TX offloads are on a per-packet basis, so it is applicable + * to both at port and queue levels. + */ dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_QINQ_INSERT | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, + }; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + /* Packets are always dropped if no descriptors are available */ + .rx_drop_en = 1, + /* The below RX offloads are always enabled */ + .offloads = (DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM), + }; memset(&link, 0, sizeof(struct qed_link_output)); qdev->ops->common->get_link(edev, &link); @@ -1661,7 +1698,7 @@ static void qede_poll_sp_sb_cb(void *param) qede_interrupt_action(ECORE_LEADING_HWFN(edev)); qede_interrupt_action(&edev->hwfns[1]); - rc = rte_eal_alarm_set(timer_period * US_PER_S, + rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, qede_poll_sp_sb_cb, (void *)eth_dev); if (rc != 0) { @@ -1990,6 +2027,35 @@ static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) QED_FILTER_RX_MODE_TYPE_REGULAR); } +static int +qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, + uint32_t mc_addrs_num) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + uint8_t i; + + if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { + DP_ERR(edev, "Reached max multicast filters limit," + "Please enable multicast promisc mode\n"); + return -ENOSPC; + } + + for (i = 0; i < mc_addrs_num; i++) { + if (!is_multicast_ether_addr(&mc_addrs[i])) { + DP_ERR(edev, "Not a valid multicast MAC\n"); + return -EINVAL; + } + } + + /* Flush all existing entries */ + if (qede_del_mcast_filters(eth_dev)) + return -1; + + /* Set new mcast list */ + return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); +} + static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { @@ -2065,6 +2131,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) RTE_PTYPE_TUNNEL_VXLAN, RTE_PTYPE_L4_FRAG, RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_GRE, /* Inner */ RTE_PTYPE_INNER_L2_ETHER, RTE_PTYPE_INNER_L2_ETHER_VLAN, @@ -2391,6 +2458,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) dev->data->dev_started = 0; qede_dev_stop(dev); restart = true; + } else { + if (IS_PF(edev)) + qede_mac_addr_remove(dev, 0); } rte_delay_ms(1000); qede_start_vport(qdev, mtu); /* Recreate vport */ @@ -2418,7 +2488,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) dev->data->dev_conf.rxmode.jumbo_frame = 0; /* Restore config lost due to vport stop */ - qede_mac_addr_set(dev, &qdev->primary_mac); + if (IS_PF(edev)) + qede_mac_addr_set(dev, &qdev->primary_mac); + if (dev->data->promiscuous) qede_promiscuous_enable(dev); else @@ -2488,7 +2560,6 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, ECORE_TUNN_CLSS_MAC_VLAN, false); break; - case RTE_TUNNEL_TYPE_GENEVE: if (qdev->geneve.udp_port != tunnel_udp->udp_port) { DP_ERR(edev, "UDP port %u doesn't exist\n", @@ -2578,7 +2649,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, qdev->vxlan.udp_port = udp_port; break; - case RTE_TUNNEL_TYPE_GENEVE: if (qdev->geneve.udp_port == tunnel_udp->udp_port) { DP_INFO(edev, @@ -2616,7 +2686,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, qdev->geneve.udp_port = udp_port; break; - default: return ECORE_INVAL; } @@ -2782,7 +2851,8 @@ qede_tunn_filter_config(struct rte_eth_dev *eth_dev, qdev->geneve.filter_type = conf->filter_type; } - if (!qdev->vxlan.enable || !qdev->geneve.enable) + if (!qdev->vxlan.enable || !qdev->geneve.enable || + !qdev->ipgre.enable) return qede_tunn_enable(eth_dev, clss, conf->tunnel_type, true); @@ -2818,15 +2888,14 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, switch (filter_conf->tunnel_type) { case RTE_TUNNEL_TYPE_VXLAN: case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_IP_IN_GRE: DP_INFO(edev, "Packet steering to the specified Rx queue" " is not supported with UDP tunneling"); return(qede_tunn_filter_config(eth_dev, filter_op, filter_conf)); - /* Place holders for future tunneling support */ case RTE_TUNNEL_TYPE_TEREDO: case RTE_TUNNEL_TYPE_NVGRE: - case RTE_TUNNEL_TYPE_IP_IN_GRE: case RTE_L2_TUNNEL_TYPE_E_TAG: DP_ERR(edev, "Unsupported tunnel type %d\n", filter_conf->tunnel_type); @@ -2871,6 +2940,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .promiscuous_disable = qede_promiscuous_disable, .allmulticast_enable = qede_allmulticast_enable, .allmulticast_disable = qede_allmulticast_disable, + .set_mc_addr_list = qede_set_mc_addr_list, .dev_stop = qede_dev_stop, .dev_close = qede_dev_close, .stats_get = qede_get_stats, @@ -2911,6 +2981,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .promiscuous_disable = qede_promiscuous_disable, .allmulticast_enable = qede_allmulticast_enable, .allmulticast_disable = qede_allmulticast_disable, + .set_mc_addr_list = qede_set_mc_addr_list, .dev_stop = qede_dev_stop, .dev_close = qede_dev_close, .stats_get = qede_get_stats, @@ -3022,7 +3093,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) * interrupt vector but we need one for each engine. */ if (ECORE_IS_CMT(edev) && IS_PF(edev)) { - rc = rte_eal_alarm_set(timer_period * US_PER_S, + rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, qede_poll_sp_sb_cb, (void *)eth_dev); if (rc != 0) { @@ -3119,25 +3190,30 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) SLIST_INIT(&adapter->fdir_info.fdir_list_head); SLIST_INIT(&adapter->vlan_list_head); SLIST_INIT(&adapter->uc_list_head); + SLIST_INIT(&adapter->mc_list_head); adapter->mtu = ETHER_MTU; adapter->vport_started = false; /* VF tunnel offloads is enabled by default in PF driver */ adapter->vxlan.num_filters = 0; adapter->geneve.num_filters = 0; + adapter->ipgre.num_filters = 0; if (is_vf) { adapter->vxlan.enable = true; adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN; adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; adapter->geneve.enable = true; - adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN; adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; + adapter->ipgre.enable = true; + adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | + ETH_TUNNEL_FILTER_IVLAN; } else { adapter->vxlan.enable = false; adapter->geneve.enable = false; + adapter->ipgre.enable = false; } DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h index 23f7e0e2..a335d4da 100644 --- a/drivers/net/qede/qede_ethdev.h +++ b/drivers/net/qede/qede_ethdev.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -45,7 +45,7 @@ /* Driver versions */ #define QEDE_PMD_VER_PREFIX "QEDE PMD" #define QEDE_PMD_VERSION_MAJOR 2 -#define QEDE_PMD_VERSION_MINOR 7 +#define QEDE_PMD_VERSION_MINOR 8 #define QEDE_PMD_VERSION_REVISION 0 #define QEDE_PMD_VERSION_PATCH 1 @@ -170,7 +170,7 @@ struct qede_fdir_info { #define QEDE_VXLAN_DEF_PORT (4789) #define QEDE_GENEVE_DEF_PORT (6081) -struct qede_udp_tunn { +struct qede_tunn_params { bool enable; uint16_t num_filters; uint16_t filter_type; @@ -205,8 +205,9 @@ struct qede_dev { SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head; uint16_t num_uc_addr; bool handle_hw_err; - struct qede_udp_tunn vxlan; - struct qede_udp_tunn geneve; + struct qede_tunn_params vxlan; + struct qede_tunn_params geneve; + struct qede_tunn_params ipgre; struct qede_fdir_info fdir_info; bool vlan_strip_flg; char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c index da6364ee..9d0b0526 100644 --- a/drivers/net/qede/qede_fdir.c +++ b/drivers/net/qede/qede_fdir.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2017 QLogic Corporation. + * Copyright (c) 2017 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -141,8 +141,8 @@ qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev, if (add) { SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) { if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) { - DP_ERR(edev, "flowdir filter exist\n"); - rc = -EEXIST; + DP_INFO(edev, "flowdir filter exist\n"); + rc = 0; goto err2; } } diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h index 246f0fd3..01f17c9a 100644 --- a/drivers/net/qede/qede_if.h +++ b/drivers/net/qede/qede_if.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h index 159315e7..e7f714f2 100644 --- a/drivers/net/qede/qede_logs.h +++ b/drivers/net/qede/qede_logs.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c index ae187321..c3407fe9 100644 --- a/drivers/net/qede/qede_main.c +++ b/drivers/net/qede/qede_main.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -9,6 +9,7 @@ #include #include #include +#include #include "qede_ethdev.h" @@ -19,7 +20,7 @@ char fw_file[PATH_MAX]; const char *QEDE_DEFAULT_FIRMWARE = - "/lib/firmware/qed/qed_init_values-8.30.12.0.bin"; + "/lib/firmware/qed/qed_init_values-8.33.12.0.bin"; static void qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params) @@ -62,6 +63,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev, hw_prepare_params.chk_reg_fifo = false; hw_prepare_params.initiate_pf_flr = true; hw_prepare_params.allow_mdump = false; + hw_prepare_params.b_en_pacing = false; hw_prepare_params.epoch = (u32)time(NULL); rc = ecore_hw_prepare(edev, &hw_prepare_params); if (rc) { @@ -302,9 +304,8 @@ static int qed_slowpath_start(struct ecore_dev *edev, drv_version.version = (params->drv_major << 24) | (params->drv_minor << 16) | (params->drv_rev << 8) | (params->drv_eng); - /* TBD: strlcpy() */ - strncpy((char *)drv_version.name, (const char *)params->name, - MCP_DRV_VER_STR_SIZE - 4); + strlcpy((char *)drv_version.name, (const char *)params->name, + sizeof(drv_version.name)); rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, &drv_version); if (rc) { diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c index 0de7c6b8..bdb5d6f1 100644 --- a/drivers/net/qede/qede_rxtx.c +++ b/drivers/net/qede/qede_rxtx.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -87,7 +87,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, /* Fix up RX buffer size */ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; - if ((rxmode->enable_scatter) || + if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) || (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) { if (!dev->data->scattered_rx) { DP_INFO(edev, "Forcing scatter-gather mode\n"); @@ -1466,6 +1466,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) */ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM; packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb); + } else { + packet_type |= qede_rx_cqe_to_pkt_type(parse_flag); } /* Common handling for non-tunnel packets and for inner @@ -1487,7 +1489,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) ol_flags |= PKT_RX_IP_CKSUM_BAD; } else { ol_flags |= PKT_RX_IP_CKSUM_GOOD; - packet_type |= qede_rx_cqe_to_pkt_type(parse_flag); } if (CQE_HAS_VLAN(parse_flag) || @@ -1631,6 +1632,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg, QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len); PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len); } + start_seg++; m_seg = m_seg->next; } @@ -1837,17 +1839,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * offloads. Don't rely on pkt_type marked by Rx, instead use * tx_ol_flags to decide. */ - if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) == - PKT_TX_TUNNEL_VXLAN) || - ((tx_ol_flags & PKT_TX_TUNNEL_MASK) == - PKT_TX_TUNNEL_MPLSINUDP) || - ((tx_ol_flags & PKT_TX_TUNNEL_MASK) == - PKT_TX_TUNNEL_GENEVE)) { + tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK); + + if (tunn_flg) { /* Check against max which is Tunnel IPv6 + ext */ if (unlikely(txq->nb_tx_avail < ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT)) break; - tunn_flg = true; + /* First indicate its a tunnel pkt */ bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; @@ -1986,7 +1985,8 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * csum offload is requested then we need to force * recalculation of L4 tunnel header csum also. */ - if (tunn_flg) { + if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) != + PKT_TX_TUNNEL_GRE)) { bd1_bd_flags_bf |= ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK << ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h index f1d36661..84a834d2 100644 --- a/drivers/net/qede/qede_rxtx.h +++ b/drivers/net/qede/qede_rxtx.h @@ -1,7 +1,7 @@ /* - * Copyright (c) 2016 QLogic Corporation. + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com + * www.cavium.com * * See LICENSE.qede_pmd for copyright and licensing details. */ @@ -76,8 +76,6 @@ ETH_RSS_VXLAN |\ ETH_RSS_GENEVE) -#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS) - #define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++) #define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++) #define QEDE_RXTX_MAX(qdev) \ @@ -153,7 +151,8 @@ PKT_TX_VLAN_PKT | \ PKT_TX_TUNNEL_VXLAN | \ PKT_TX_TUNNEL_GENEVE | \ - PKT_TX_TUNNEL_MPLSINUDP) + PKT_TX_TUNNEL_MPLSINUDP | \ + PKT_TX_TUNNEL_GRE) #define QEDE_TX_OFFLOAD_NOTSUP_MASK \ (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK) diff --git a/drivers/net/ring/meson.build b/drivers/net/ring/meson.build index e877a4b4..7659b04f 100644 --- a/drivers/net/ring/meson.build +++ b/drivers/net/ring/meson.build @@ -1,5 +1,6 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation +version = 2 sources = files('rte_eth_ring.c') install_headers('rte_eth_ring.h') diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c index df13c44b..35b837c3 100644 --- a/drivers/net/ring/rte_eth_ring.c +++ b/drivers/net/ring/rte_eth_ring.c @@ -60,9 +60,15 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG + .link_autoneg = ETH_LINK_FIXED, }; +static int eth_ring_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, eth_ring_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + static uint16_t eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { @@ -256,18 +262,9 @@ do_eth_dev_ring_create(const char *name, void **tx_queues_local = NULL; unsigned i; - RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n", + PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u", numa_node); - /* now do all data allocation - for eth_dev structure, dummy pci driver - * and internal (private) data - */ - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (data == NULL) { - rte_errno = ENOMEM; - goto error; - } - rx_queues_local = rte_zmalloc_socket(name, sizeof(void *) * nb_rx_queues, 0, numa_node); if (rx_queues_local == NULL) { @@ -301,10 +298,8 @@ do_eth_dev_ring_create(const char *name, * - point eth_dev_data to internals * - and point eth_dev structure to new eth_dev_data structure */ - /* NOTE: we'll replace the data element, of originally allocated eth_dev - * so the rings are local per-process */ - rte_memcpy(data, eth_dev->data, sizeof(*data)); + data = eth_dev->data; data->rx_queues = rx_queues_local; data->tx_queues = tx_queues_local; @@ -326,7 +321,6 @@ do_eth_dev_ring_create(const char *name, data->dev_link = pmd_link; data->mac_addrs = &internals->address; - eth_dev->data = data; eth_dev->dev_ops = &ops; data->kdrv = RTE_KDRV_NONE; data->numa_node = numa_node; @@ -335,6 +329,7 @@ do_eth_dev_ring_create(const char *name, eth_dev->rx_pkt_burst = eth_ring_rx; eth_dev->tx_pkt_burst = eth_ring_tx; + rte_eth_dev_probing_finish(eth_dev); *eth_dev_p = eth_dev; return data->port_id; @@ -342,7 +337,6 @@ do_eth_dev_ring_create(const char *name, error: rte_free(rx_queues_local); rte_free(tx_queues_local); - rte_free(data); rte_free(internals); return -1; @@ -459,13 +453,13 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void * ret = -EINVAL; if (!name) { - RTE_LOG(WARNING, PMD, "command line parameter is empty for ring pmd!\n"); + PMD_LOG(WARNING, "command line parameter is empty for ring pmd!"); goto out; } node = strchr(name, ':'); if (!node) { - RTE_LOG(WARNING, PMD, "could not parse node value from %s\n", + PMD_LOG(WARNING, "could not parse node value from %s", name); goto out; } @@ -475,7 +469,7 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void * action = strchr(node, ':'); if (!action) { - RTE_LOG(WARNING, PMD, "could not parse action value from %s\n", + PMD_LOG(WARNING, "could not parse action value from %s", node); goto out; } @@ -498,7 +492,8 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void * info->list[info->count].node = strtol(node, &end, 10); if ((errno != 0) || (*end != '\0')) { - RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node); + PMD_LOG(WARNING, + "node value %s is unparseable as a number", node); goto out; } @@ -542,14 +537,14 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) name = rte_vdev_device_name(dev); params = rte_vdev_device_args(dev); - RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name); + PMD_LOG(INFO, "Initializing pmd_ring for %s", name); if (params == NULL || params[0] == '\0') { ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE, ð_dev); if (ret == -1) { - RTE_LOG(INFO, PMD, - "Attach to pmd_ring for %s\n", name); + PMD_LOG(INFO, + "Attach to pmd_ring for %s", name); ret = eth_dev_ring_create(name, rte_socket_id(), DEV_ATTACH, ð_dev); } @@ -557,13 +552,13 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) kvlist = rte_kvargs_parse(params, valid_arguments); if (!kvlist) { - RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating" - " rings-backed ethernet device\n"); + PMD_LOG(INFO, "Ignoring unsupported parameters when creating" + " rings-backed ethernet device"); ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE, ð_dev); if (ret == -1) { - RTE_LOG(INFO, PMD, - "Attach to pmd_ring for %s\n", + PMD_LOG(INFO, + "Attach to pmd_ring for %s", name); ret = eth_dev_ring_create(name, rte_socket_id(), DEV_ATTACH, ð_dev); @@ -617,8 +612,8 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev) ð_dev); if ((ret == -1) && (info->list[info->count].action == DEV_CREATE)) { - RTE_LOG(INFO, PMD, - "Attach to pmd_ring for %s\n", + PMD_LOG(INFO, + "Attach to pmd_ring for %s", name); ret = eth_dev_ring_create(name, info->list[info->count].node, @@ -647,7 +642,7 @@ rte_pmd_ring_remove(struct rte_vdev_device *dev) struct ring_queue *r = NULL; uint16_t i; - RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name); + PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name); if (name == NULL) return -EINVAL; @@ -675,8 +670,6 @@ rte_pmd_ring_remove(struct rte_vdev_device *dev) rte_free(eth_dev->data->tx_queues); rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); - rte_eth_dev_release_port(eth_dev); return 0; } @@ -690,3 +683,12 @@ RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv); RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring); RTE_PMD_REGISTER_PARAM_STRING(net_ring, ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)"); + +RTE_INIT(eth_ring_init_log); +static void +eth_ring_init_log(void) +{ + eth_ring_logtype = rte_log_register("pmd.net.ring"); + if (eth_ring_logtype >= 0) + rte_log_set_level(eth_ring_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile index 8a671dd2..3bb41a00 100644 --- a/drivers/net/sfc/Makefile +++ b/drivers/net/sfc/Makefile @@ -46,11 +46,11 @@ else ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable # Suppress ICC false positive warning on 'bulk' may be used before its # value is set -CFLAGS_sfc_ef10_tx.o += -wd3656 +CFLAGS_sfc_ef10_tx.o += -diag-disable 3656 endif LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -LDLIBS += -lrte_bus_pci +LDLIBS += -lrte_bus_pci -lrte_pci # # List of base driver object files for which @@ -81,6 +81,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_essb_rx.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c VPATH += $(SRCDIR)/base @@ -115,6 +116,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_vpd.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_ev.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_filter.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_intr.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_image.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mac.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mcdi.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nic.c @@ -125,5 +127,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_tx.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_vpd.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += hunt_nic.c SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford_nic.c +SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford2_nic.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c index 05700c5c..7f89a7bf 100644 --- a/drivers/net/sfc/base/ef10_ev.c +++ b/drivers/net/sfc/base/ef10_ev.c @@ -10,7 +10,7 @@ #include "mcdi_mon.h" #endif -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ @@ -549,7 +549,8 @@ ef10_ev_qdestroy( efx_nic_t *enp = eep->ee_enp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) efx_mcdi_fini_evq(enp, eep->ee_index); } @@ -576,7 +577,7 @@ ef10_ev_qprime( EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, ERF_DD_EVQ_IND_RPTR, (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); - EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); EFX_POPULATE_DWORD_2(dword, @@ -584,11 +585,11 @@ ef10_ev_qprime( EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, ERF_DD_EVQ_IND_RPTR, rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); - EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, B_FALSE); } else { EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); - EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, + EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); } @@ -701,13 +702,19 @@ ef10_ev_qmoderate( EFE_DD_EVQ_IND_TIMER_FLAGS, ERF_DD_EVQ_IND_TIMER_MODE, mode, ERF_DD_EVQ_IND_TIMER_VAL, ticks); - EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, + EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, &dword, 0); } else { - EFX_POPULATE_DWORD_2(dword, + /* + * NOTE: The TMR_REL field introduced in Medford2 is + * ignored on earlier EF10 controllers. See bug66418 + * comment 9 for details. + */ + EFX_POPULATE_DWORD_3(dword, ERF_DZ_TC_TIMER_MODE, mode, - ERF_DZ_TC_TIMER_VAL, ticks); - EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG, + ERF_DZ_TC_TIMER_VAL, ticks, + ERF_FZ_TC_TMR_REL_VAL, ticks); + EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG, eep->ee_index, &dword, 0); } } @@ -742,7 +749,7 @@ ef10_ev_qstats_update( } #endif /* EFSYS_OPT_QSTATS */ -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER static __checkReturn boolean_t ef10_ev_rx_packed_stream( @@ -781,14 +788,25 @@ ef10_ev_rx_packed_stream( if (new_buffer) { flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER; +#if EFSYS_OPT_RX_PACKED_STREAM + /* + * If both packed stream and equal stride super-buffer + * modes are compiled in, in theory credits should be + * be maintained for packed stream only, but right now + * these modes are not distinguished in the event queue + * Rx queue state and it is OK to increment the counter + * regardless (it might be event cheaper than branching + * since neighbour structure member are updated as well). + */ eersp->eers_rx_packed_stream_credits++; +#endif eersp->eers_rx_read_ptr++; } current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask; /* Check for errors that invalidate checksum and L3/L4 fields */ - if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { - /* RX frame truncated (error flag is misnamed) */ + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { + /* RX frame truncated */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; @@ -823,7 +841,7 @@ deliver: return (should_abort); } -#endif /* EFSYS_OPT_RX_PACKED_STREAM */ +#endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */ static __checkReturn boolean_t ef10_ev_rx( @@ -857,7 +875,7 @@ ef10_ev_rx( label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); eersp = &eep->ee_rxq_state[label]; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER /* * Packed stream events are very different, * so handle them separately @@ -867,12 +885,23 @@ ef10_ev_rx( #endif size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); + cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); - l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS); - cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); + + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only + * 2 bits wide on Medford2. Check it is safe to use the Medford2 field + * and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN); + + l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS); if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { /* Drop this event */ @@ -914,8 +943,8 @@ ef10_ev_rx( last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; /* Check for errors that invalidate checksum and L3/L4 fields */ - if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) { - /* RX frame truncated (error flag is misnamed) */ + if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { + /* RX frame truncated */ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); flags |= EFX_DISCARD; goto deliver; @@ -951,10 +980,22 @@ ef10_ev_rx( flags |= EFX_CKSUM_IPV4; } - if (l4_class == ESE_DZ_L4_CLASS_TCP) { + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is + * only 2 bits wide on Medford2. Check it is safe to use the + * Medford2 field and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == + ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == + ESE_DE_L4_CLASS_UNKNOWN); + + if (l4_class == ESE_FZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_TCP; - } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { + } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_UDP; } else { @@ -966,10 +1007,22 @@ ef10_ev_rx( case ESE_DZ_L3_CLASS_IP6_FRAG: flags |= EFX_PKT_IPV6; - if (l4_class == ESE_DZ_L4_CLASS_TCP) { + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is + * only 2 bits wide on Medford2. Check it is safe to use the + * Medford2 field and values for all EF10 controllers. + */ + EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == + ESF_DE_RX_L4_CLASS_LBN); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); + EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == + ESE_DE_L4_CLASS_UNKNOWN); + + if (l4_class == ESE_FZ_L4_CLASS_TCP) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_TCP; - } else if (l4_class == ESE_DZ_L4_CLASS_UDP) { + } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_UDP; } else { @@ -1322,8 +1375,9 @@ ef10_ev_rxlabel_init( __in efx_rxq_type_t type) { efx_evq_rxq_state_t *eersp; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM); + boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER); #endif _NOTE(ARGUNUSED(type)) @@ -1345,9 +1399,11 @@ ef10_ev_rxlabel_init( eersp->eers_rx_read_ptr = 0; #endif eersp->eers_rx_mask = erp->er_mask; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER eersp->eers_rx_stream_npackets = 0; - eersp->eers_rx_packed_stream = packed_stream; + eersp->eers_rx_packed_stream = packed_stream || es_super_buffer; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM if (packed_stream) { eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) / EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT, @@ -1381,11 +1437,13 @@ ef10_ev_rxlabel_fini( eersp->eers_rx_read_ptr = 0; eersp->eers_rx_mask = 0; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER eersp->eers_rx_stream_npackets = 0; eersp->eers_rx_packed_stream = B_FALSE; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM eersp->eers_rx_packed_stream_credits = 0; #endif } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c index 27b59987..ae872853 100644 --- a/drivers/net/sfc/base/ef10_filter.c +++ b/drivers/net/sfc/base/ef10_filter.c @@ -7,7 +7,7 @@ #include "efx.h" #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #if EFSYS_OPT_FILTER @@ -95,7 +95,8 @@ ef10_filter_init( ef10_filter_table_t *eftp; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); #define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match)) EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST == @@ -118,6 +119,10 @@ ef10_filter_init( MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_VNI_OR_VSID == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID)); + EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_LOC_MAC == + MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST == MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST)); EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST == @@ -150,7 +155,8 @@ ef10_filter_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (enp->en_filter.ef_ef10_filter_table != NULL) { EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t), @@ -166,17 +172,24 @@ efx_mcdi_filter_op_add( __inout ef10_filter_handle_t *handle) { efx_mcdi_req_t req; - uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN, + uint8_t payload[MAX(MC_CMD_FILTER_OP_V3_IN_LEN, MC_CMD_FILTER_OP_EXT_OUT_LEN)]; + efx_filter_match_flags_t match_flags; efx_rc_t rc; memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_FILTER_OP; req.emr_in_buf = payload; - req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN; + req.emr_in_length = MC_CMD_FILTER_OP_V3_IN_LEN; req.emr_out_buf = payload; req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN; + /* + * Remove match flag for encapsulated filters that does not correspond + * to the MCDI match flags + */ + match_flags = spec->efs_match_flags & ~EFX_FILTER_MATCH_ENCAP_TYPE; + switch (filter_op) { case MC_CMD_FILTER_OP_IN_OP_REPLACE: MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO, @@ -197,11 +210,16 @@ efx_mcdi_filter_op_add( MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS, - spec->efs_match_flags); - MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, - MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST); - MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE, - spec->efs_dmaq_id); + match_flags); + if (spec->efs_dmaq_id == EFX_FILTER_SPEC_RX_DMAQ_ID_DROP) { + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, + MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP); + } else { + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST, + MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST); + MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE, + spec->efs_dmaq_id); + } #if EFSYS_OPT_RX_SCALE if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) { @@ -290,18 +308,45 @@ efx_mcdi_filter_op_add( rc = EINVAL; goto fail2; } + + memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_VNI_OR_VSID), + spec->efs_vni_or_vsid, EFX_VNI_OR_VSID_LEN); + + memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_IFRM_DST_MAC), + spec->efs_ifrm_loc_mac, EFX_MAC_ADDR_LEN); + } + + /* + * Set the "MARK" or "FLAG" action for all packets matching this filter + * if necessary (only useful with equal stride packed stream Rx mode + * which provide the information in pseudo-header). + * These actions require MC_CMD_FILTER_OP_V3_IN msgrequest. + */ + if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) && + (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG)) { + rc = EINVAL; + goto fail3; + } + if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) { + MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION, + MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK); + MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_MARK_VALUE, + spec->efs_mark); + } else if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) { + MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION, + MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG); } efx_mcdi_execute(enp, &req); if (req.emr_rc != 0) { rc = req.emr_rc; - goto fail3; + goto fail4; } if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) { rc = EMSGSIZE; - goto fail4; + goto fail5; } handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO); @@ -309,6 +354,8 @@ efx_mcdi_filter_op_add( return (0); +fail5: + EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: @@ -413,6 +460,12 @@ ef10_filter_equal( return (B_FALSE); if (left->efs_encap_type != right->efs_encap_type) return (B_FALSE); + if (memcmp(left->efs_vni_or_vsid, right->efs_vni_or_vsid, + EFX_VNI_OR_VSID_LEN)) + return (B_FALSE); + if (memcmp(left->efs_ifrm_loc_mac, right->efs_ifrm_loc_mac, + EFX_MAC_ADDR_LEN)) + return (B_FALSE); return (B_TRUE); @@ -495,7 +548,8 @@ ef10_filter_restore( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) { @@ -570,7 +624,8 @@ ef10_filter_add_internal( boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); hash = ef10_filter_hash(spec); @@ -842,7 +897,8 @@ ef10_filter_delete( boolean_t locked = B_FALSE; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); hash = ef10_filter_hash(spec); @@ -890,6 +946,7 @@ efx_mcdi_get_parser_disp_info( __in efx_nic_t *enp, __out_ecount(buffer_length) uint32_t *buffer, __in size_t buffer_length, + __in boolean_t encap, __out size_t *list_lengthp) { efx_mcdi_req_t req; @@ -906,7 +963,8 @@ efx_mcdi_get_parser_disp_info( req.emr_out_buf = payload; req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX; - MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, + MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, encap ? + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); efx_mcdi_execute(enp, &req); @@ -966,28 +1024,76 @@ ef10_filter_supported_filters( __in size_t buffer_length, __out size_t *list_lengthp) { - + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); size_t mcdi_list_length; + size_t mcdi_encap_list_length; size_t list_length; uint32_t i; + uint32_t next_buf_idx; + size_t next_buf_length; efx_rc_t rc; + boolean_t no_space = B_FALSE; efx_filter_match_flags_t all_filter_flags = (EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT | EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID | EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_VNI_OR_VSID | + EFX_FILTER_MATCH_IFRM_LOC_MAC | + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST | + EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST | + EFX_FILTER_MATCH_ENCAP_TYPE | EFX_FILTER_MATCH_UNKNOWN_MCAST_DST | EFX_FILTER_MATCH_UNKNOWN_UCAST_DST); - rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, - &mcdi_list_length); + /* + * Two calls to MC_CMD_GET_PARSER_DISP_INFO are needed: one to get the + * list of supported filters for ordinary packets, and then another to + * get the list of supported filters for encapsulated packets. To + * distinguish the second list from the first, the + * EFX_FILTER_MATCH_ENCAP_TYPE flag is added to each filter for + * encapsulated packets. + */ + rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, B_FALSE, + &mcdi_list_length); if (rc != 0) { - if (rc == ENOSPC) { - /* Pass through mcdi_list_length for the list length */ - *list_lengthp = mcdi_list_length; + if (rc == ENOSPC) + no_space = B_TRUE; + else + goto fail1; + } + + if (no_space) { + next_buf_idx = 0; + next_buf_length = 0; + } else { + EFSYS_ASSERT(mcdi_list_length <= buffer_length); + next_buf_idx = mcdi_list_length; + next_buf_length = buffer_length - mcdi_list_length; + } + + if (encp->enc_tunnel_encapsulations_supported != 0) { + rc = efx_mcdi_get_parser_disp_info(enp, &buffer[next_buf_idx], + next_buf_length, B_TRUE, &mcdi_encap_list_length); + if (rc != 0) { + if (rc == ENOSPC) + no_space = B_TRUE; + else + goto fail2; + } else { + for (i = next_buf_idx; + i < next_buf_idx + mcdi_encap_list_length; i++) + buffer[i] |= EFX_FILTER_MATCH_ENCAP_TYPE; } - goto fail1; + } else { + mcdi_encap_list_length = 0; + } + + if (no_space) { + *list_lengthp = mcdi_list_length + mcdi_encap_list_length; + rc = ENOSPC; + goto fail3; } /* @@ -1000,9 +1106,10 @@ ef10_filter_supported_filters( * of the matches is preserved as they are ordered from highest to * lowest priority. */ - EFSYS_ASSERT(mcdi_list_length <= buffer_length); + EFSYS_ASSERT(mcdi_list_length + mcdi_encap_list_length <= + buffer_length); list_length = 0; - for (i = 0; i < mcdi_list_length; i++) { + for (i = 0; i < mcdi_list_length + mcdi_encap_list_length; i++) { if ((buffer[i] & ~all_filter_flags) == 0) { buffer[list_length] = buffer[i]; list_length++; @@ -1013,6 +1120,10 @@ ef10_filter_supported_filters( return (0); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); @@ -1636,4 +1747,4 @@ ef10_filter_default_rxq_clear( #endif /* EFSYS_OPT_FILTER */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_image.c b/drivers/net/sfc/base/ef10_image.c new file mode 100644 index 00000000..6fb7e476 --- /dev/null +++ b/drivers/net/sfc/base/ef10_image.c @@ -0,0 +1,885 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2017-2018 Solarflare Communications Inc. + * All rights reserved. + */ + +#include "efx.h" +#include "efx_impl.h" + +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 + +#if EFSYS_OPT_IMAGE_LAYOUT + +/* + * Utility routines to support limited parsing of ASN.1 tags. This is not a + * general purpose ASN.1 parser, but is sufficient to locate the required + * objects in a signed image with CMS headers. + */ + +/* DER encodings for ASN.1 tags (see ITU-T X.690) */ +#define ASN1_TAG_INTEGER (0x02) +#define ASN1_TAG_OCTET_STRING (0x04) +#define ASN1_TAG_OBJ_ID (0x06) +#define ASN1_TAG_SEQUENCE (0x30) +#define ASN1_TAG_SET (0x31) + +#define ASN1_TAG_IS_PRIM(tag) ((tag & 0x20) == 0) + +#define ASN1_TAG_PRIM_CONTEXT(n) (0x80 + (n)) +#define ASN1_TAG_CONS_CONTEXT(n) (0xA0 + (n)) + +typedef struct efx_asn1_cursor_s { + uint8_t *buffer; + uint32_t length; + + uint8_t tag; + uint32_t hdr_size; + uint32_t val_size; +} efx_asn1_cursor_t; + + +/* Parse header of DER encoded ASN.1 TLV and match tag */ +static __checkReturn efx_rc_t +efx_asn1_parse_header_match_tag( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag) +{ + efx_rc_t rc; + + if (cursor == NULL || cursor->buffer == NULL || cursor->length < 2) { + rc = EINVAL; + goto fail1; + } + + cursor->tag = cursor->buffer[0]; + if (cursor->tag != tag) { + /* Tag not matched */ + rc = ENOENT; + goto fail2; + } + + if ((cursor->tag & 0x1F) == 0x1F) { + /* Long tag format not used in CMS syntax */ + rc = EINVAL; + goto fail3; + } + + if ((cursor->buffer[1] & 0x80) == 0) { + /* Short form: length is 0..127 */ + cursor->hdr_size = 2; + cursor->val_size = cursor->buffer[1]; + } else { + /* Long form: length encoded as [0x80+nbytes][length bytes] */ + uint32_t nbytes = cursor->buffer[1] & 0x7F; + uint32_t offset; + + if (nbytes == 0) { + /* Indefinite length not allowed in DER encoding */ + rc = EINVAL; + goto fail4; + } + if (2 + nbytes > cursor->length) { + /* Header length overflows image buffer */ + rc = EINVAL; + goto fail6; + } + if (nbytes > sizeof (uint32_t)) { + /* Length encoding too big */ + rc = E2BIG; + goto fail5; + } + cursor->hdr_size = 2 + nbytes; + cursor->val_size = 0; + for (offset = 2; offset < cursor->hdr_size; offset++) { + cursor->val_size = + (cursor->val_size << 8) | cursor->buffer[offset]; + } + } + + if ((cursor->hdr_size + cursor->val_size) > cursor->length) { + /* Length overflows image buffer */ + rc = E2BIG; + goto fail7; + } + + return (0); + +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Enter nested ASN.1 TLV (contained in value of current TLV) */ +static __checkReturn efx_rc_t +efx_asn1_enter_tag( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag) +{ + efx_rc_t rc; + + if (cursor == NULL) { + rc = EINVAL; + goto fail1; + } + + if (ASN1_TAG_IS_PRIM(tag)) { + /* Cannot enter a primitive tag */ + rc = ENOTSUP; + goto fail2; + } + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail3; + } + + /* Limit cursor range to nested TLV */ + cursor->buffer += cursor->hdr_size; + cursor->length = cursor->val_size; + + return (0); + +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* + * Check that the current ASN.1 TLV matches the given tag and value. + * Advance cursor to next TLV on a successful match. + */ +static __checkReturn efx_rc_t +efx_asn1_match_tag_value( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag, + __in const void *valp, + __in uint32_t val_size) +{ + efx_rc_t rc; + + if (cursor == NULL) { + rc = EINVAL; + goto fail1; + } + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail2; + } + if (cursor->val_size != val_size) { + /* Value size is different */ + rc = EINVAL; + goto fail3; + } + if (memcmp(cursor->buffer + cursor->hdr_size, valp, val_size) != 0) { + /* Value content is different */ + rc = EINVAL; + goto fail4; + } + cursor->buffer += cursor->hdr_size + cursor->val_size; + cursor->length -= cursor->hdr_size + cursor->val_size; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Advance cursor to next TLV */ +static __checkReturn efx_rc_t +efx_asn1_skip_tag( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag) +{ + efx_rc_t rc; + + if (cursor == NULL) { + rc = EINVAL; + goto fail1; + } + + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail2; + } + cursor->buffer += cursor->hdr_size + cursor->val_size; + cursor->length -= cursor->hdr_size + cursor->val_size; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +/* Return pointer to value octets and value size from current TLV */ +static __checkReturn efx_rc_t +efx_asn1_get_tag_value( + __inout efx_asn1_cursor_t *cursor, + __in uint8_t tag, + __out uint8_t **valp, + __out uint32_t *val_sizep) +{ + efx_rc_t rc; + + if (cursor == NULL || valp == NULL || val_sizep == NULL) { + rc = EINVAL; + goto fail1; + } + + rc = efx_asn1_parse_header_match_tag(cursor, tag); + if (rc != 0) { + /* Invalid TLV or wrong tag */ + goto fail2; + } + *valp = cursor->buffer + cursor->hdr_size; + *val_sizep = cursor->val_size; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + +/* + * Utility routines for parsing CMS headers (see RFC2315, PKCS#7) + */ + +/* OID 1.2.840.113549.1.7.2 */ +static const uint8_t PKCS7_SignedData[] = +{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x02 }; + +/* OID 1.2.840.113549.1.7.1 */ +static const uint8_t PKCS7_Data[] = +{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x01 }; + +/* SignedData structure version */ +static const uint8_t SignedData_Version[] = +{ 0x03 }; + +/* + * Check for a valid image in signed image format. This uses CMS syntax + * (see RFC2315, PKCS#7) to provide signatures, and certificates required + * to validate the signatures. The encapsulated content is in unsigned image + * format (reflash header, image code, trailer checksum). + */ +static __checkReturn efx_rc_t +efx_check_signed_image_header( + __in void *bufferp, + __in uint32_t buffer_size, + __out uint32_t *content_offsetp, + __out uint32_t *content_lengthp) +{ + efx_asn1_cursor_t cursor; + uint8_t *valp; + uint32_t val_size; + efx_rc_t rc; + + if (content_offsetp == NULL || content_lengthp == NULL) { + rc = EINVAL; + goto fail1; + } + cursor.buffer = (uint8_t *)bufferp; + cursor.length = buffer_size; + + /* ContextInfo */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE); + if (rc != 0) + goto fail2; + + /* ContextInfo.contentType */ + rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID, + PKCS7_SignedData, sizeof (PKCS7_SignedData)); + if (rc != 0) + goto fail3; + + /* ContextInfo.content */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0)); + if (rc != 0) + goto fail4; + + /* SignedData */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE); + if (rc != 0) + goto fail5; + + /* SignedData.version */ + rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_INTEGER, + SignedData_Version, sizeof (SignedData_Version)); + if (rc != 0) + goto fail6; + + /* SignedData.digestAlgorithms */ + rc = efx_asn1_skip_tag(&cursor, ASN1_TAG_SET); + if (rc != 0) + goto fail7; + + /* SignedData.encapContentInfo */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE); + if (rc != 0) + goto fail8; + + /* SignedData.encapContentInfo.econtentType */ + rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID, + PKCS7_Data, sizeof (PKCS7_Data)); + if (rc != 0) + goto fail9; + + /* SignedData.encapContentInfo.econtent */ + rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0)); + if (rc != 0) + goto fail10; + + /* + * The octet string contains the image header, image code bytes and + * image trailer CRC (same as unsigned image layout). + */ + valp = NULL; + val_size = 0; + rc = efx_asn1_get_tag_value(&cursor, ASN1_TAG_OCTET_STRING, + &valp, &val_size); + if (rc != 0) + goto fail11; + + if ((valp == NULL) || (val_size == 0)) { + rc = EINVAL; + goto fail12; + } + if (valp < (uint8_t *)bufferp) { + rc = EINVAL; + goto fail13; + } + if ((valp + val_size) > ((uint8_t *)bufferp + buffer_size)) { + rc = EINVAL; + goto fail14; + } + + *content_offsetp = (uint32_t)(valp - (uint8_t *)bufferp); + *content_lengthp = val_size; + + return (0); + +fail14: + EFSYS_PROBE(fail14); +fail13: + EFSYS_PROBE(fail13); +fail12: + EFSYS_PROBE(fail12); +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +static __checkReturn efx_rc_t +efx_check_unsigned_image( + __in void *bufferp, + __in uint32_t buffer_size) +{ + efx_image_header_t *header; + efx_image_trailer_t *trailer; + uint32_t crc; + efx_rc_t rc; + + EFX_STATIC_ASSERT(sizeof (*header) == EFX_IMAGE_HEADER_SIZE); + EFX_STATIC_ASSERT(sizeof (*trailer) == EFX_IMAGE_TRAILER_SIZE); + + /* Must have at least enough space for required image header fields */ + if (buffer_size < (EFX_FIELD_OFFSET(efx_image_header_t, eih_size) + + sizeof (header->eih_size))) { + rc = ENOSPC; + goto fail1; + } + header = (efx_image_header_t *)bufferp; + + if (header->eih_magic != EFX_IMAGE_HEADER_MAGIC) { + rc = EINVAL; + goto fail2; + } + + /* + * Check image header version is same or higher than lowest required + * version. + */ + if (header->eih_version < EFX_IMAGE_HEADER_VERSION) { + rc = EINVAL; + goto fail3; + } + + /* Buffer must have space for image header, code and image trailer. */ + if (buffer_size < (header->eih_size + header->eih_code_size + + EFX_IMAGE_TRAILER_SIZE)) { + rc = ENOSPC; + goto fail4; + } + + /* Check CRC from image buffer matches computed CRC. */ + trailer = (efx_image_trailer_t *)((uint8_t *)header + + header->eih_size + header->eih_code_size); + + crc = efx_crc32_calculate(0, (uint8_t *)header, + (header->eih_size + header->eih_code_size)); + + if (trailer->eit_crc != crc) { + rc = EINVAL; + goto fail5; + } + + return (0); + +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_check_reflash_image( + __in void *bufferp, + __in uint32_t buffer_size, + __out efx_image_info_t *infop) +{ + efx_image_format_t format = EFX_IMAGE_FORMAT_NO_IMAGE; + uint32_t image_offset; + uint32_t image_size; + void *imagep; + efx_rc_t rc; + + + EFSYS_ASSERT(infop != NULL); + if (infop == NULL) { + rc = EINVAL; + goto fail1; + } + memset(infop, 0, sizeof (*infop)); + + if (bufferp == NULL || buffer_size == 0) { + rc = EINVAL; + goto fail2; + } + + /* + * Check if the buffer contains an image in signed format, and if so, + * locate the image header. + */ + rc = efx_check_signed_image_header(bufferp, buffer_size, + &image_offset, &image_size); + if (rc == 0) { + /* + * Buffer holds signed image format. Check that the encapsulated + * content is in unsigned image format. + */ + format = EFX_IMAGE_FORMAT_SIGNED; + } else { + /* Check if the buffer holds image in unsigned image format */ + format = EFX_IMAGE_FORMAT_UNSIGNED; + image_offset = 0; + image_size = buffer_size; + } + if (image_offset + image_size > buffer_size) { + rc = E2BIG; + goto fail3; + } + imagep = (uint8_t *)bufferp + image_offset; + + /* Check unsigned image layout (image header, code, image trailer) */ + rc = efx_check_unsigned_image(imagep, image_size); + if (rc != 0) + goto fail4; + + /* Return image details */ + infop->eii_format = format; + infop->eii_imagep = bufferp; + infop->eii_image_size = buffer_size; + infop->eii_headerp = (efx_image_header_t *)imagep; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); + infop->eii_format = EFX_IMAGE_FORMAT_INVALID; + infop->eii_imagep = NULL; + infop->eii_image_size = 0; + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_build_signed_image_write_buffer( + __out uint8_t *bufferp, + __in uint32_t buffer_size, + __in efx_image_info_t *infop, + __out efx_image_header_t **headerpp) +{ + signed_image_chunk_hdr_t chunk_hdr; + uint32_t hdr_offset; + struct { + uint32_t offset; + uint32_t size; + } cms_header, image_header, code, image_trailer, signature; + efx_rc_t rc; + + EFSYS_ASSERT((infop != NULL) && (headerpp != NULL)); + + if ((bufferp == NULL) || (buffer_size == 0) || + (infop == NULL) || (headerpp == NULL)) { + /* Invalid arguments */ + rc = EINVAL; + goto fail1; + } + if ((infop->eii_format != EFX_IMAGE_FORMAT_SIGNED) || + (infop->eii_imagep == NULL) || + (infop->eii_headerp == NULL) || + ((uint8_t *)infop->eii_headerp < (uint8_t *)infop->eii_imagep) || + (infop->eii_image_size < EFX_IMAGE_HEADER_SIZE) || + ((size_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep) > + (infop->eii_image_size - EFX_IMAGE_HEADER_SIZE))) { + /* Invalid image info */ + rc = EINVAL; + goto fail2; + } + + /* Locate image chunks in original signed image */ + cms_header.offset = 0; + cms_header.size = + (uint32_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep); + if ((cms_header.size > buffer_size) || + (cms_header.offset > (buffer_size - cms_header.size))) { + rc = EINVAL; + goto fail3; + } + + image_header.offset = cms_header.offset + cms_header.size; + image_header.size = infop->eii_headerp->eih_size; + if ((image_header.size > buffer_size) || + (image_header.offset > (buffer_size - image_header.size))) { + rc = EINVAL; + goto fail4; + } + + code.offset = image_header.offset + image_header.size; + code.size = infop->eii_headerp->eih_code_size; + if ((code.size > buffer_size) || + (code.offset > (buffer_size - code.size))) { + rc = EINVAL; + goto fail5; + } + + image_trailer.offset = code.offset + code.size; + image_trailer.size = EFX_IMAGE_TRAILER_SIZE; + if ((image_trailer.size > buffer_size) || + (image_trailer.offset > (buffer_size - image_trailer.size))) { + rc = EINVAL; + goto fail6; + } + + signature.offset = image_trailer.offset + image_trailer.size; + signature.size = (uint32_t)(infop->eii_image_size - signature.offset); + if ((signature.size > buffer_size) || + (signature.offset > (buffer_size - signature.size))) { + rc = EINVAL; + goto fail7; + } + + EFSYS_ASSERT3U(infop->eii_image_size, ==, cms_header.size + + image_header.size + code.size + image_trailer.size + + signature.size); + + /* BEGIN CSTYLED */ + /* + * Build signed image partition, inserting chunk headers. + * + * Signed Image: Image in NVRAM partition: + * + * +-----------------+ +-----------------+ + * | CMS header | | mcfw.update |<----+ + * +-----------------+ | | | + * | reflash header | +-----------------+ | + * +-----------------+ | chunk header: |-->--|-+ + * | mcfw.update | | REFLASH_TRAILER | | | + * | | +-----------------+ | | + * +-----------------+ +-->| CMS header | | | + * | reflash trailer | | +-----------------+ | | + * +-----------------+ | | chunk header: |->-+ | | + * | signature | | | REFLASH_HEADER | | | | + * +-----------------+ | +-----------------+ | | | + * | | reflash header |<--+ | | + * | +-----------------+ | | + * | | chunk header: |-->--+ | + * | | IMAGE | | + * | +-----------------+ | + * | | reflash trailer |<------+ + * | +-----------------+ + * | | chunk header: | + * | | SIGNATURE |->-+ + * | +-----------------+ | + * | | signature |<--+ + * | +-----------------+ + * | | ...unused... | + * | +-----------------+ + * +-<-| chunk header: | + * >-->| CMS_HEADER | + * +-----------------+ + * + * Each chunk header gives the partition offset and length of the image + * chunk's data. The image chunk data is immediately followed by the + * chunk header for the next chunk. + * + * The data chunk for the firmware code must be at the start of the + * partition (needed for the bootloader). The first chunk header in the + * chain (for the CMS header) is stored at the end of the partition. The + * chain of chunk headers maintains the same logical order of image + * chunks as the original signed image file. This set of constraints + * results in the layout used for the data chunks and chunk headers. + */ + /* END CSTYLED */ + memset(bufferp, buffer_size, 0xFF); + + EFX_STATIC_ASSERT(sizeof (chunk_hdr) == SIGNED_IMAGE_CHUNK_HDR_LEN); + memset(&chunk_hdr, 0, SIGNED_IMAGE_CHUNK_HDR_LEN); + + /* + * CMS header + */ + if (buffer_size < SIGNED_IMAGE_CHUNK_HDR_LEN) { + rc = ENOSPC; + goto fail8; + } + hdr_offset = buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN; + + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_CMS_HEADER; + chunk_hdr.offset = code.size + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = cms_header.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, sizeof (chunk_hdr)); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail9; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + cms_header.offset, + cms_header.size); + + /* + * Image header + */ + hdr_offset = chunk_hdr.offset + chunk_hdr.len; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail10; + } + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_HEADER; + chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = image_header.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail11; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + image_header.offset, + image_header.size); + + *headerpp = (efx_image_header_t *)(bufferp + chunk_hdr.offset); + + /* + * Firmware code + */ + hdr_offset = chunk_hdr.offset + chunk_hdr.len; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail12; + } + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_IMAGE; + chunk_hdr.offset = 0; + chunk_hdr.len = code.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail13; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + code.offset, + code.size); + + /* + * Image trailer (CRC) + */ + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_TRAILER; + chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = image_trailer.size; + + hdr_offset = code.size; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail14; + } + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail15; + } + memcpy((uint8_t *)bufferp + chunk_hdr.offset, + infop->eii_imagep + image_trailer.offset, + image_trailer.size); + + /* + * Signature + */ + hdr_offset = chunk_hdr.offset + chunk_hdr.len; + if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) { + rc = ENOSPC; + goto fail16; + } + chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC; + chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION; + chunk_hdr.id = SIGNED_IMAGE_CHUNK_SIGNATURE; + chunk_hdr.offset = chunk_hdr.offset + SIGNED_IMAGE_CHUNK_HDR_LEN; + chunk_hdr.len = signature.size; + + memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN); + + if ((chunk_hdr.len > buffer_size) || + (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) { + rc = ENOSPC; + goto fail17; + } + memcpy(bufferp + chunk_hdr.offset, + infop->eii_imagep + signature.offset, + signature.size); + + return (0); + +fail17: + EFSYS_PROBE(fail17); +fail16: + EFSYS_PROBE(fail16); +fail15: + EFSYS_PROBE(fail15); +fail14: + EFSYS_PROBE(fail14); +fail13: + EFSYS_PROBE(fail13); +fail12: + EFSYS_PROBE(fail12); +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + + +#endif /* EFSYS_OPT_IMAGE_LAYOUT */ + +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h index e79f4d53..4751faf1 100644 --- a/drivers/net/sfc/base/ef10_impl.h +++ b/drivers/net/sfc/base/ef10_impl.h @@ -11,13 +11,27 @@ extern "C" { #endif -#if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD) -#define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS) -#elif EFSYS_OPT_HUNTINGTON -#define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS -#elif EFSYS_OPT_MEDFORD -#define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS -#endif + +/* Number of hardware PIO buffers (for compile-time resource dimensions) */ +#define EF10_MAX_PIOBUF_NBUFS (16) + +#if EFSYS_OPT_HUNTINGTON +# if (EF10_MAX_PIOBUF_NBUFS < HUNT_PIOBUF_NBUFS) +# error "EF10_MAX_PIOBUF_NBUFS too small" +# endif +#endif /* EFSYS_OPT_HUNTINGTON */ +#if EFSYS_OPT_MEDFORD +# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD_PIOBUF_NBUFS) +# error "EF10_MAX_PIOBUF_NBUFS too small" +# endif +#endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 +# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD2_PIOBUF_NBUFS) +# error "EF10_MAX_PIOBUF_NBUFS too small" +# endif +#endif /* EFSYS_OPT_MEDFORD2 */ + + /* * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could @@ -742,6 +756,7 @@ extern void ef10_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, @@ -753,6 +768,11 @@ ef10_tx_qdesc_vlantci_create( __in uint16_t vlan_tci, __out efx_desc_t *edp); +extern void +ef10_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS @@ -947,13 +967,15 @@ extern void ef10_rx_qenable( __in efx_rxq_t *erp); +union efx_rxq_type_data_u; + extern __checkReturn efx_rc_t ef10_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, - __in uint32_t type_data, + __in const union efx_rxq_type_data_u *type_data, __in efsys_mem_t *esmp, __in size_t ndescs, __in uint32_t id, @@ -1130,6 +1152,11 @@ efx_mcdi_get_clock( __out uint32_t *dpcpu_freqp); +extern __checkReturn efx_rc_t +efx_mcdi_get_rxdp_config( + __in efx_nic_t *enp, + __out uint32_t *end_paddingp); + extern __checkReturn efx_rc_t efx_mcdi_get_vector_cfg( __in efx_nic_t *enp, @@ -1137,20 +1164,27 @@ efx_mcdi_get_vector_cfg( __out_opt uint32_t *pf_nvecp, __out_opt uint32_t *vf_nvecp); -extern __checkReturn efx_rc_t -ef10_get_datapath_caps( - __in efx_nic_t *enp); - extern __checkReturn efx_rc_t ef10_get_privilege_mask( __in efx_nic_t *enp, __out uint32_t *maskp); +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + extern __checkReturn efx_rc_t -ef10_external_port_mapping( +efx_mcdi_get_nic_global( __in efx_nic_t *enp, - __in uint32_t port, - __out uint8_t *external_portp); + __in uint32_t key, + __out uint32_t *valuep); + +extern __checkReturn efx_rc_t +efx_mcdi_set_nic_global( + __in efx_nic_t *enp, + __in uint32_t key, + __in uint32_t value); + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ + #if EFSYS_OPT_RX_PACKED_STREAM @@ -1182,6 +1216,16 @@ ef10_external_port_mapping( #endif /* EFSYS_OPT_RX_PACKED_STREAM */ +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + +/* + * Maximum DMA length and buffer stride alignment. + * (see SF-119419-TC, 3.2) + */ +#define EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT 64 + +#endif + #ifdef __cplusplus } #endif diff --git a/drivers/net/sfc/base/ef10_intr.c b/drivers/net/sfc/base/ef10_intr.c index f79c44e3..1ffe266b 100644 --- a/drivers/net/sfc/base/ef10_intr.c +++ b/drivers/net/sfc/base/ef10_intr.c @@ -8,7 +8,7 @@ #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 __checkReturn efx_rc_t ef10_intr_init( @@ -56,7 +56,8 @@ efx_mcdi_trigger_interrupt( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (level >= enp->en_nic_cfg.enc_intr_limit) { rc = EINVAL; @@ -129,7 +130,8 @@ ef10_intr_status_line( efx_dword_t dword; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* Read the queue mask and implicitly acknowledge the interrupt. */ EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE); @@ -147,7 +149,8 @@ ef10_intr_status_message( __out boolean_t *fatalp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); _NOTE(ARGUNUSED(enp, message)) @@ -170,4 +173,4 @@ ef10_intr_fini( _NOTE(ARGUNUSED(enp)) } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_mac.c b/drivers/net/sfc/base/ef10_mac.c index db7692ee..1031e836 100644 --- a/drivers/net/sfc/base/ef10_mac.c +++ b/drivers/net/sfc/base/ef10_mac.c @@ -8,7 +8,7 @@ #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 __checkReturn efx_rc_t ef10_mac_poll( @@ -356,7 +356,8 @@ ef10_mac_multicast_list_set( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if ((rc = emop->emo_reconfigure(enp)) != 0) goto fail1; @@ -522,8 +523,44 @@ ef10_mac_stats_get_mask( goto fail6; } + if (encp->enc_fec_counters) { + const struct efx_mac_stats_range ef10_fec[] = { + { EFX_MAC_FEC_UNCORRECTED_ERRORS, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3 }, + }; + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_fec, EFX_ARRAY_SIZE(ef10_fec))) != 0) + goto fail7; + } + + if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V4) { + const struct efx_mac_stats_range ef10_rxdp_sdt[] = { + { EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC, + EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC }, + }; + + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_rxdp_sdt, EFX_ARRAY_SIZE(ef10_rxdp_sdt))) != 0) + goto fail8; + } + + if (encp->enc_hlb_counters) { + const struct efx_mac_stats_range ef10_hlb[] = { + { EFX_MAC_RXDP_HLB_IDLE, EFX_MAC_RXDP_HLB_TIMEOUT }, + }; + if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size, + ef10_hlb, EFX_ARRAY_SIZE(ef10_hlb))) != 0) + goto fail9; + } + return (0); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); fail6: EFSYS_PROBE(fail6); fail5: @@ -551,16 +588,45 @@ ef10_mac_stats_update( __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp) { - efx_qword_t value; + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; efx_qword_t generation_start; efx_qword_t generation_end; + efx_qword_t value; + efx_rc_t rc; - _NOTE(ARGUNUSED(enp)) + /* + * The MAC_STATS contain start and end generation counters used to + * detect when the DMA buffer has been updated during stats decode. + * All stats counters are 64bit unsigned values. + * + * Siena-compatible MAC stats contain MC_CMD_MAC_NSTATS 64bit counters. + * The generation end counter is at index MC_CMD_MAC_GENERATION_END + * (same as MC_CMD_MAC_NSTATS-1). + * + * Medford2 and later use a larger DMA buffer: MAC_STATS_NUM_STATS from + * MC_CMD_GET_CAPABILITIES_V4_OUT reports the number of 64bit counters. + * + * Firmware writes the generation end counter as the last counter in the + * DMA buffer. Do not use MC_CMD_MAC_GENERATION_END, as that is only + * correct for legacy Siena-compatible MAC stats. + */ + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { + /* MAC stats count too small for legacy MAC stats */ + rc = ENOSPC; + goto fail1; + } + if (EFSYS_MEM_SIZE(esmp) < + (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) { + /* DMA buffer too small */ + rc = ENOSPC; + goto fail2; + } /* Read END first so we don't race with the MC */ - EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); - EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END, - &generation_end); + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); + EF10_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1), + &generation_end); EFSYS_MEM_READ_BARRIER(); /* TX */ @@ -851,7 +917,109 @@ ef10_mac_stats_update( EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value); - EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V2) + goto done; + + /* FEC */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_UNCORRECTED_ERRORS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_UNCORRECTED_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_ERRORS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_ERRORS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3]), + &value); + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V3) + goto done; + + /* CTPIO exceptions */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_BUSY_FALLBACK]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_LONG_WRITE_SUCCESS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_MISSING_DBELL_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_OVERFLOW_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_OVERFLOW_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNDERFLOW_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_TIMEOUT_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_TIMEOUT_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_NONCONTIG_WR_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FRM_CLOBBER_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_INVALID_WR_FAIL, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_INVALID_WR_FAIL]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK]), + &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_RUNT_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_RUNT_FALLBACK]), &value); + + /* CTPIO per-port stats */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_SUCCESS, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_SUCCESS]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FALLBACK, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FALLBACK]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_POISON, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_POISON]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_ERASE, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_ERASE]), &value); + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V4) + goto done; + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC, + &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC]), + &value); + + /* Head-of-line blocking */ + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_IDLE, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_IDLE]), &value); + + EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_TIMEOUT, &value); + EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_TIMEOUT]), &value); + +done: + /* Read START generation counter */ + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); EFSYS_MEM_READ_BARRIER(); EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START, &generation_start); @@ -866,8 +1034,15 @@ ef10_mac_stats_update( *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0); return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); } #endif /* EFSYS_OPT_MAC_STATS */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_mcdi.c b/drivers/net/sfc/base/ef10_mcdi.c index 1f9e573f..8a3fc3b4 100644 --- a/drivers/net/sfc/base/ef10_mcdi.c +++ b/drivers/net/sfc/base/ef10_mcdi.c @@ -8,7 +8,7 @@ #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #if EFSYS_OPT_MCDI @@ -28,7 +28,8 @@ ef10_mcdi_init( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA); /* @@ -135,7 +136,8 @@ ef10_mcdi_send_request( unsigned int pos; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* Write the header */ for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) { @@ -186,13 +188,17 @@ ef10_mcdi_read_response( { const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; efsys_mem_t *esmp = emtp->emt_dma_mem; - unsigned int pos; + unsigned int pos = 0; efx_dword_t data; + size_t remaining = length; + + while (remaining > 0) { + size_t chunk = MIN(remaining, sizeof (data)); - for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) { EFSYS_MEM_READD(esmp, offset + pos, &data); - memcpy((uint8_t *)bufferp + pos, &data, - MIN(sizeof (data), length - pos)); + memcpy((uint8_t *)bufferp + pos, &data, chunk); + pos += chunk; + remaining -= chunk; } } @@ -254,7 +260,8 @@ ef10_mcdi_feature_supported( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* * Use privilege mask state at MCDI attach. @@ -315,4 +322,4 @@ fail1: #endif /* EFSYS_OPT_MCDI */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c index eb9ec2be..7dbf843b 100644 --- a/drivers/net/sfc/base/ef10_nic.c +++ b/drivers/net/sfc/base/ef10_nic.c @@ -10,7 +10,7 @@ #include "mcdi_mon.h" #endif -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #include "ef10_tlv_layout.h" @@ -25,7 +25,8 @@ efx_mcdi_get_port_assignment( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT; @@ -70,7 +71,8 @@ efx_mcdi_get_port_modes( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_PORT_MODES; @@ -250,7 +252,8 @@ efx_mcdi_get_mac_address_pf( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES; @@ -308,7 +311,8 @@ efx_mcdi_get_mac_address_vf( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES; @@ -372,7 +376,8 @@ efx_mcdi_get_clock( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_CLOCK; @@ -415,6 +420,64 @@ fail2: fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_get_rxdp_config( + __in efx_nic_t *enp, + __out uint32_t *end_paddingp) +{ + efx_mcdi_req_t req; + uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN, + MC_CMD_GET_RXDP_CONFIG_OUT_LEN)]; + uint32_t end_padding; + efx_rc_t rc; + + memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_GET_RXDP_CONFIG; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN; + + efx_mcdi_execute(enp, &req); + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, + GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) { + /* RX DMA end padding is disabled */ + end_padding = 0; + } else { + switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, + GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) { + case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64: + end_padding = 64; + break; + case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128: + end_padding = 128; + break; + case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256: + end_padding = 256; + break; + default: + rc = ENOTSUP; + goto fail2; + } + } + + *end_paddingp = end_padding; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + return (rc); } @@ -783,7 +846,8 @@ ef10_nic_pio_alloc( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); EFSYS_ASSERT(bufnump); EFSYS_ASSERT(handlep); EFSYS_ASSERT(blknump); @@ -925,62 +989,103 @@ fail1: return (rc); } - __checkReturn efx_rc_t +static __checkReturn efx_rc_t ef10_get_datapath_caps( __in efx_nic_t *enp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); - uint32_t flags; - uint32_t flags2; - uint32_t tso2nc; + efx_mcdi_req_t req; + uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN, + MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)]; efx_rc_t rc; - if ((rc = efx_mcdi_get_capabilities(enp, &flags, NULL, NULL, - &flags2, &tso2nc)) != 0) - goto fail1; - if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0) goto fail1; -#define CAP_FLAG(flags1, field) \ - ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN))) -#define CAP_FLAG2(flags2, field) \ - ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN))) + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_GET_CAPABILITIES; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN; + + efx_mcdi_execute_quiet(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail2; + } + + if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { + rc = EMSGSIZE; + goto fail3; + } + +#define CAP_FLAGS1(_req, _flag) \ + (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \ + (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))) + +#define CAP_FLAGS2(_req, _flag) \ + (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \ + (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \ + (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))) /* * Huntington RXDP firmware inserts a 0 or 14 byte prefix. * We only support the 14 byte prefix here. */ - if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) { + if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) { rc = ENOTSUP; - goto fail2; + goto fail4; } encp->enc_rx_prefix_size = 14; + /* Check if the firmware supports additional RSS modes */ + if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES)) + encp->enc_rx_scale_additional_modes_supported = B_TRUE; + else + encp->enc_rx_scale_additional_modes_supported = B_FALSE; + /* Check if the firmware supports TSO */ - encp->enc_fw_assisted_tso_enabled = - CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, TX_TSO)) + encp->enc_fw_assisted_tso_enabled = B_TRUE; + else + encp->enc_fw_assisted_tso_enabled = B_FALSE; /* Check if the firmware supports FATSOv2 */ - encp->enc_fw_assisted_tso_v2_enabled = - CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE; + if (CAP_FLAGS2(req, TX_TSO_V2)) { + encp->enc_fw_assisted_tso_v2_enabled = B_TRUE; + encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req, + GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS); + } else { + encp->enc_fw_assisted_tso_v2_enabled = B_FALSE; + encp->enc_fw_assisted_tso_v2_n_contexts = 0; + } - /* Get the number of TSO contexts (FATSOv2) */ - encp->enc_fw_assisted_tso_v2_n_contexts = - CAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0; + /* Check if the firmware supports FATSOv2 encap */ + if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP)) + encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE; + else + encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE; /* Check if the firmware has vadapter/vport/vswitch support */ - encp->enc_datapath_cap_evb = - CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, EVB)) + encp->enc_datapath_cap_evb = B_TRUE; + else + encp->enc_datapath_cap_evb = B_FALSE; /* Check if the firmware supports VLAN insertion */ - encp->enc_hw_tx_insert_vlan_enabled = - CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, TX_VLAN_INSERTION)) + encp->enc_hw_tx_insert_vlan_enabled = B_TRUE; + else + encp->enc_hw_tx_insert_vlan_enabled = B_FALSE; /* Check if the firmware supports RX event batching */ - encp->enc_rx_batching_enabled = - CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, RX_BATCHING)) + encp->enc_rx_batching_enabled = B_TRUE; + else + encp->enc_rx_batching_enabled = B_FALSE; /* * Even if batching isn't reported as supported, we may still get @@ -989,38 +1094,61 @@ ef10_get_datapath_caps( encp->enc_rx_batch_max = 16; /* Check if the firmware supports disabling scatter on RXQs */ - encp->enc_rx_disable_scatter_supported = - CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, RX_DISABLE_SCATTER)) + encp->enc_rx_disable_scatter_supported = B_TRUE; + else + encp->enc_rx_disable_scatter_supported = B_FALSE; /* Check if the firmware supports packed stream mode */ - encp->enc_rx_packed_stream_supported = - CAP_FLAG(flags, RX_PACKED_STREAM) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, RX_PACKED_STREAM)) + encp->enc_rx_packed_stream_supported = B_TRUE; + else + encp->enc_rx_packed_stream_supported = B_FALSE; /* * Check if the firmware supports configurable buffer sizes * for packed stream mode (otherwise buffer size is 1Mbyte) */ - encp->enc_rx_var_packed_stream_supported = - CAP_FLAG(flags, RX_PACKED_STREAM_VAR_BUFFERS) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS)) + encp->enc_rx_var_packed_stream_supported = B_TRUE; + else + encp->enc_rx_var_packed_stream_supported = B_FALSE; + + /* Check if the firmware supports equal stride super-buffer mode */ + if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER)) + encp->enc_rx_es_super_buffer_supported = B_TRUE; + else + encp->enc_rx_es_super_buffer_supported = B_FALSE; + + /* Check if the firmware supports FW subvariant w/o Tx checksumming */ + if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM)) + encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE; + else + encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE; /* Check if the firmware supports set mac with running filters */ - encp->enc_allow_set_mac_with_installed_filters = - CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ? - B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED)) + encp->enc_allow_set_mac_with_installed_filters = B_TRUE; + else + encp->enc_allow_set_mac_with_installed_filters = B_FALSE; /* * Check if firmware supports the extended MC_CMD_SET_MAC, which allows * specifying which parameters to configure. */ - encp->enc_enhanced_set_mac_supported = - CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, SET_MAC_ENHANCED)) + encp->enc_enhanced_set_mac_supported = B_TRUE; + else + encp->enc_enhanced_set_mac_supported = B_FALSE; /* * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows * us to let the firmware choose the settings to use on an EVQ. */ - encp->enc_init_evq_v2_supported = - CAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE; + if (CAP_FLAGS2(req, INIT_EVQ_V2)) + encp->enc_init_evq_v2_supported = B_TRUE; + else + encp->enc_init_evq_v2_supported = B_FALSE; /* * Check if firmware-verified NVRAM updates must be used. @@ -1030,29 +1158,34 @@ ef10_get_datapath_caps( * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated * partition and report the result). */ - encp->enc_nvram_update_verify_result_supported = - CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ? - B_TRUE : B_FALSE; + if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT)) + encp->enc_nvram_update_verify_result_supported = B_TRUE; + else + encp->enc_nvram_update_verify_result_supported = B_FALSE; /* * Check if firmware provides packet memory and Rx datapath * counters. */ - encp->enc_pm_and_rxdp_counters = - CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE; + if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS)) + encp->enc_pm_and_rxdp_counters = B_TRUE; + else + encp->enc_pm_and_rxdp_counters = B_FALSE; /* * Check if the 40G MAC hardware is capable of reporting * statistics for Tx size bins. */ - encp->enc_mac_stats_40g_tx_size_bins = - CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE; + if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS)) + encp->enc_mac_stats_40g_tx_size_bins = B_TRUE; + else + encp->enc_mac_stats_40g_tx_size_bins = B_FALSE; /* * Check if firmware supports VXLAN and NVGRE tunnels. * The capability indicates Geneve protocol support as well. */ - if (CAP_FLAG(flags, VXLAN_NVGRE)) { + if (CAP_FLAGS1(req, VXLAN_NVGRE)) { encp->enc_tunnel_encapsulations_supported = (1u << EFX_TUNNEL_PROTOCOL_VXLAN) | (1u << EFX_TUNNEL_PROTOCOL_GENEVE) | @@ -1066,11 +1199,136 @@ ef10_get_datapath_caps( encp->enc_tunnel_config_udp_entries_max = 0; } -#undef CAP_FLAG -#undef CAP_FLAG2 + /* + * Check if firmware reports the VI window mode. + * Medford2 has a variable VI window size (8K, 16K or 64K). + * Medford and Huntington have a fixed 8K VI window size. + */ + if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { + uint8_t mode = + MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); + + switch (mode) { + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; + break; + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K; + break; + case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K; + break; + default: + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID; + break; + } + } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) || + (enp->en_family == EFX_FAMILY_MEDFORD)) { + /* Huntington and Medford have fixed 8K window size */ + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; + } else { + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID; + } + + /* Check if firmware supports extended MAC stats. */ + if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { + /* Extended stats buffer supported */ + encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req, + GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); + } else { + /* Use Siena-compatible legacy MAC stats */ + encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS; + } + + if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2) + encp->enc_fec_counters = B_TRUE; + else + encp->enc_fec_counters = B_FALSE; + + /* Check if the firmware provides head-of-line blocking counters */ + if (CAP_FLAGS2(req, RXDP_HLB_IDLE)) + encp->enc_hlb_counters = B_TRUE; + else + encp->enc_hlb_counters = B_FALSE; + + if (CAP_FLAGS1(req, RX_RSS_LIMITED)) { + /* Only one exclusive RSS context is available per port. */ + encp->enc_rx_scale_max_exclusive_contexts = 1; + + switch (enp->en_family) { + case EFX_FAMILY_MEDFORD2: + encp->enc_rx_scale_hash_alg_mask = + (1U << EFX_RX_HASHALG_TOEPLITZ); + break; + + case EFX_FAMILY_MEDFORD: + case EFX_FAMILY_HUNTINGTON: + /* + * Packed stream firmware variant maintains a + * non-standard algorithm for hash computation. + * It implies explicit XORing together + * source + destination IP addresses (or last + * four bytes in the case of IPv6) and using the + * resulting value as the input to a Toeplitz hash. + */ + encp->enc_rx_scale_hash_alg_mask = + (1U << EFX_RX_HASHALG_PACKED_STREAM); + break; + + default: + rc = EINVAL; + goto fail5; + } + + /* Port numbers cannot contribute to the hash value */ + encp->enc_rx_scale_l4_hash_supported = B_FALSE; + } else { + /* + * Maximum number of exclusive RSS contexts. + * EF10 hardware supports 64 in total, but 6 are reserved + * for shared contexts. They are a global resource so + * not all may be available. + */ + encp->enc_rx_scale_max_exclusive_contexts = 64 - 6; + + encp->enc_rx_scale_hash_alg_mask = + (1U << EFX_RX_HASHALG_TOEPLITZ); + + /* + * It is possible to use port numbers as + * the input data for hash computation. + */ + encp->enc_rx_scale_l4_hash_supported = B_TRUE; + } + /* Check if the firmware supports "FLAG" and "MARK" filter actions */ + if (CAP_FLAGS2(req, FILTER_ACTION_FLAG)) + encp->enc_filter_action_flag_supported = B_TRUE; + else + encp->enc_filter_action_flag_supported = B_FALSE; + + if (CAP_FLAGS2(req, FILTER_ACTION_MARK)) + encp->enc_filter_action_mark_supported = B_TRUE; + else + encp->enc_filter_action_mark_supported = B_FALSE; + + /* Get maximum supported value for "MARK" filter action */ + if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN) + encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req, + GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX); + else + encp->enc_filter_action_mark_max = 0; + +#undef CAP_FLAGS1 +#undef CAP_FLAGS2 return (0); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: @@ -1132,78 +1390,238 @@ fail1: /* - * Table of mapping schemes from port number to the number of the external - * connector on the board. The external numbering does not distinguish - * off-board separated outputs such as from multi-headed cables. + * Table of mapping schemes from port number to external number. + * + * Each port number ultimately corresponds to a connector: either as part of + * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on + * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T + * "Salina"). In general: * - * The count of adjacent port numbers that map to each external port + * Port number (0-based) + * | + * port mapping (n:1) + * | + * v + * External port number (normally 1-based) + * | + * fixed (1:1) or cable assembly (1:m) + * | + * v + * Connector + * + * The external numbering refers to the cages or magjacks on the board, + * as visibly annotated on the board or back panel. This table describes + * how to determine which external cage/magjack corresponds to the port + * numbers used by the driver. + * + * The count of adjacent port numbers that map to each external number, * and the offset in the numbering, is determined by the chip family and * current port mode. * * For the Huntington family, the current port mode cannot be discovered, + * but a single mapping is used by all modes for a given chip variant, * so the mapping used is instead the last match in the table to the full * set of port modes to which the NIC can be configured. Therefore the * ordering of entries in the mapping table is significant. */ -static struct { +static struct ef10_external_port_map_s { efx_family_t family; uint32_t modes_mask; int32_t count; int32_t offset; } __ef10_external_port_mappings[] = { - /* Supported modes with 1 output per external port */ + /* + * Modes used by Huntington family controllers where each port + * number maps to a separate cage. + * SFN7x22F (Torino): + * port 0 -> cage 1 + * port 1 -> cage 2 + * SFN7xx4F (Pavia): + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 3 + * port 3 -> cage 4 + */ { EFX_FAMILY_HUNTINGTON, - (1 << TLV_PORT_MODE_10G) | - (1 << TLV_PORT_MODE_10G_10G) | - (1 << TLV_PORT_MODE_10G_10G_10G_10G), - 1, - 1 + (1U << TLV_PORT_MODE_10G) | /* mode 0 */ + (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */ + (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */ + 1, /* ports per cage */ + 1 /* first cage */ }, + /* + * Modes which for Huntington identify a chip variant where 2 + * adjacent port numbers map to each cage. + * SFN7x42Q (Monza): + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ { - EFX_FAMILY_MEDFORD, - (1 << TLV_PORT_MODE_10G) | - (1 << TLV_PORT_MODE_10G_10G), - 1, - 1 + EFX_FAMILY_HUNTINGTON, + (1U << TLV_PORT_MODE_40G) | /* mode 1 */ + (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */ + (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */ + (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */ + 2, /* ports per cage */ + 1 /* first cage */ }, - /* Supported modes with 2 outputs per external port */ + /* + * Modes that on Medford allocate each port number to a separate + * cage. + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 3 + * port 3 -> cage 4 + */ { - EFX_FAMILY_HUNTINGTON, - (1 << TLV_PORT_MODE_40G) | - (1 << TLV_PORT_MODE_40G_40G) | - (1 << TLV_PORT_MODE_40G_10G_10G) | - (1 << TLV_PORT_MODE_10G_10G_40G), - 2, - 1 + EFX_FAMILY_MEDFORD, + (1U << TLV_PORT_MODE_10G) | /* mode 0 */ + (1U << TLV_PORT_MODE_10G_10G), /* mode 2 */ + 1, /* ports per cage */ + 1 /* first cage */ }, + /* + * Modes that on Medford allocate 2 adjacent port numbers to each + * cage. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ { EFX_FAMILY_MEDFORD, - (1 << TLV_PORT_MODE_40G) | - (1 << TLV_PORT_MODE_40G_40G) | - (1 << TLV_PORT_MODE_40G_10G_10G) | - (1 << TLV_PORT_MODE_10G_10G_40G) | - (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), - 2, - 1 + (1U << TLV_PORT_MODE_40G) | /* mode 1 */ + (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */ + (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */ + (1U << TLV_PORT_MODE_10G_10G_40G) | /* mode 7 */ + /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */ + (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */ + 2, /* ports per cage */ + 1 /* first cage */ }, - /* Supported modes with 4 outputs per external port */ + /* + * Modes that on Medford allocate 4 adjacent port numbers to each + * connector, starting on cage 1. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 1 + * port 3 -> cage 1 + */ { EFX_FAMILY_MEDFORD, - (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) | - (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1), - 4, - 1, + (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q) | /* mode 5 */ + /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */ + (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1), /* mode 4 */ + 4, /* ports per cage */ + 1 /* first cage */ }, + /* + * Modes that on Medford allocate 4 adjacent port numbers to each + * connector, starting on cage 2. + * port 0 -> cage 2 + * port 1 -> cage 2 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ { EFX_FAMILY_MEDFORD, - (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2), - 4, - 2 + (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q2), /* mode 8 */ + 4, /* ports per cage */ + 2 /* first cage */ + }, + /* + * Modes that on Medford2 allocate each port number to a separate + * cage. + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 3 + * port 3 -> cage 4 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */ + (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */ + (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */ + (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */ + (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */ + (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */ + (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */ + 1, /* ports per cage */ + 1 /* first cage */ + }, + /* + * FIXME: Some port modes are not representable in this mapping: + * - TLV_PORT_MODE_1x2_2x1 (mode 17): + * port 0 -> cage 1 + * port 1 -> cage 2 + * port 2 -> cage 2 + */ + /* + * Modes that on Medford2 allocate 2 adjacent port numbers to each + * cage, starting on cage 1. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */ + (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */ + (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */ + (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */ + (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */ + (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */ + 2, /* ports per cage */ + 1 /* first cage */ + }, + /* + * Modes that on Medford2 allocate 2 adjacent port numbers to each + * cage, starting on cage 2. + * port 0 -> cage 2 + * port 1 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */ + 2, /* ports per cage */ + 2 /* first cage */ + }, + /* + * Modes that on Medford2 allocate 4 adjacent port numbers to each + * connector, starting on cage 1. + * port 0 -> cage 1 + * port 1 -> cage 1 + * port 2 -> cage 1 + * port 3 -> cage 1 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */ + 4, /* ports per cage */ + 1 /* first cage */ + }, + /* + * Modes that on Medford2 allocate 4 adjacent port numbers to each + * connector, starting on cage 2. + * port 0 -> cage 2 + * port 1 -> cage 2 + * port 2 -> cage 2 + * port 3 -> cage 2 + */ + { + EFX_FAMILY_MEDFORD2, + (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */ + (1U << TLV_PORT_MODE_NA_1x2), /* mode 11 */ + 4, /* ports per cage */ + 2 /* first cage */ }, }; - __checkReturn efx_rc_t +static __checkReturn efx_rc_t ef10_external_port_mapping( __in efx_nic_t *enp, __in uint32_t port, @@ -1219,7 +1637,7 @@ ef10_external_port_mapping( if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t)) != 0) { /* - * No current port mode information + * No current port mode information (i.e. Huntington) * - infer mapping from available modes */ if ((rc = efx_mcdi_get_port_modes(enp, @@ -1236,18 +1654,23 @@ ef10_external_port_mapping( } /* - * Infer the internal port -> external port mapping from + * Infer the internal port -> external number mapping from * the possible port modes for this NIC. */ for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) { - if (__ef10_external_port_mappings[i].family != - enp->en_family) + struct ef10_external_port_map_s *eepmp = + &__ef10_external_port_mappings[i]; + if (eepmp->family != enp->en_family) continue; - matches = (__ef10_external_port_mappings[i].modes_mask & - port_modes); + matches = (eepmp->modes_mask & port_modes); if (matches != 0) { - count = __ef10_external_port_mappings[i].count; - offset = __ef10_external_port_mappings[i].offset; + /* + * Some modes match. For some Huntington boards + * there will be multiple matches. The mapping on the + * last match is used. + */ + count = eepmp->count; + offset = eepmp->offset; port_modes &= ~matches; } } @@ -1272,18 +1695,193 @@ fail1: return (rc); } +static __checkReturn efx_rc_t +ef10_nic_board_cfg( + __in efx_nic_t *enp) +{ + const efx_nic_ops_t *enop = enp->en_enop; + efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + ef10_link_state_t els; + efx_port_t *epp = &(enp->en_port); + uint32_t board_type = 0; + uint32_t base, nvec; + uint32_t port; + uint32_t mask; + uint32_t pf; + uint32_t vf; + uint8_t mac_addr[6] = { 0 }; + efx_rc_t rc; + + /* Get the (zero-based) MCDI port number */ + if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) + goto fail1; + + /* EFX MCDI interface uses one-based port numbers */ + emip->emi_port = port + 1; + + if ((rc = ef10_external_port_mapping(enp, port, + &encp->enc_external_port)) != 0) + goto fail2; + + /* + * Get PCIe function number from firmware (used for + * per-function privilege and dynamic config info). + * - PCIe PF: pf = PF number, vf = 0xffff. + * - PCIe VF: pf = parent PF, vf = VF number. + */ + if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) + goto fail3; + + encp->enc_pf = pf; + encp->enc_vf = vf; + + /* MAC address for this function */ + if (EFX_PCI_FUNCTION_IS_PF(encp)) { + rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); +#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC + /* + * Disable static config checking, ONLY for manufacturing test + * and setup at the factory, to allow the static config to be + * installed. + */ +#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ + if ((rc == 0) && (mac_addr[0] & 0x02)) { + /* + * If the static config does not include a global MAC + * address pool then the board may return a locally + * administered MAC address (this should only happen on + * incorrectly programmed boards). + */ + rc = EINVAL; + } +#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ + } else { + rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); + } + if (rc != 0) + goto fail4; + + EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); + + /* Board configuration (legacy) */ + rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); + if (rc != 0) { + /* Unprivileged functions may not be able to read board cfg */ + if (rc == EACCES) + board_type = 0; + else + goto fail5; + } + + encp->enc_board_type = board_type; + encp->enc_clk_mult = 1; /* not used for EF10 */ + + /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ + if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) + goto fail6; + + /* Obtain the default PHY advertised capabilities */ + if ((rc = ef10_phy_get_link(enp, &els)) != 0) + goto fail7; + epp->ep_default_adv_cap_mask = els.els_adv_cap_mask; + epp->ep_adv_cap_mask = els.els_adv_cap_mask; + + /* Check capabilities of running datapath firmware */ + if ((rc = ef10_get_datapath_caps(enp)) != 0) + goto fail8; + + /* Alignment for WPTR updates */ + encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; + + encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); + /* No boundary crossing limits */ + encp->enc_tx_dma_desc_boundary = 0; + + /* + * Maximum number of bytes into the frame the TCP header can start for + * firmware assisted TSO to work. + */ + encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; + + /* + * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use + * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available + * resources (allocated to this PCIe function), which is zero until + * after we have allocated VIs. + */ + encp->enc_evq_limit = 1024; + encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; + encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; + + encp->enc_buftbl_limit = 0xFFFFFFFF; + + /* Get interrupt vector limits */ + if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { + if (EFX_PCI_FUNCTION_IS_PF(encp)) + goto fail9; + + /* Ignore error (cannot query vector limits from a VF). */ + base = 0; + nvec = 1024; + } + encp->enc_intr_vec_base = base; + encp->enc_intr_limit = nvec; + + /* + * Get the current privilege mask. Note that this may be modified + * dynamically, so this value is informational only. DO NOT use + * the privilege mask to check for sufficient privileges, as that + * can result in time-of-check/time-of-use bugs. + */ + if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) + goto fail10; + encp->enc_privilege_mask = mask; + + /* Get remaining controller-specific board config */ + if ((rc = enop->eno_board_cfg(enp)) != 0) + if (rc != EACCES) + goto fail11; + + return (0); + +fail11: + EFSYS_PROBE(fail11); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); +fail7: + EFSYS_PROBE(fail7); +fail6: + EFSYS_PROBE(fail6); +fail5: + EFSYS_PROBE(fail5); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} __checkReturn efx_rc_t ef10_nic_probe( __in efx_nic_t *enp) { - const efx_nic_ops_t *enop = enp->en_enop; efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* Read and clear any assertion state */ if ((rc = efx_mcdi_read_assertion(enp)) != 0) @@ -1297,9 +1895,8 @@ ef10_nic_probe( if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) goto fail3; - if ((rc = enop->eno_board_cfg(enp)) != 0) - if (rc != EACCES) - goto fail4; + if ((rc = ef10_nic_board_cfg(enp)) != 0) + goto fail4; /* * Set default driver config limits (based on board config). @@ -1494,10 +2091,12 @@ ef10_nic_init( uint32_t i; uint32_t retry; uint32_t delay_us; + uint32_t vi_window_size; efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* Enable reporting of some events (e.g. link change) */ if ((rc = efx_mcdi_log_ctrl(enp)) != 0) @@ -1555,15 +2154,21 @@ ef10_nic_init( enp->en_arch.ef10.ena_pio_write_vi_base = vi_count - enp->en_arch.ef10.ena_piobuf_count; + EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=, + EFX_VI_WINDOW_SHIFT_INVALID); + EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=, + EFX_VI_WINDOW_SHIFT_64K); + vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift; + /* Save UC memory mapping details */ enp->en_arch.ef10.ena_uc_mem_map_offset = 0; if (enp->en_arch.ef10.ena_piobuf_count > 0) { enp->en_arch.ef10.ena_uc_mem_map_size = - (ER_DZ_TX_PIOBUF_STEP * + (vi_window_size * enp->en_arch.ef10.ena_pio_write_vi_base); } else { enp->en_arch.ef10.ena_uc_mem_map_size = - (ER_DZ_TX_PIOBUF_STEP * + (vi_window_size * enp->en_arch.ef10.ena_vi_count); } @@ -1573,7 +2178,7 @@ ef10_nic_init( enp->en_arch.ef10.ena_uc_mem_map_size; enp->en_arch.ef10.ena_wc_mem_map_size = - (ER_DZ_TX_PIOBUF_STEP * + (vi_window_size * enp->en_arch.ef10.ena_piobuf_count); /* Link piobufs to extra VIs in WC mapping */ @@ -1653,7 +2258,8 @@ ef10_nic_get_vi_pool( __out uint32_t *vi_countp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* * Report VIs that the client driver can use. @@ -1674,7 +2280,8 @@ ef10_nic_get_bar_region( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* * TODO: Specify host memory mapping alignment and granularity @@ -1770,5 +2377,87 @@ fail1: #endif /* EFSYS_OPT_DIAG */ +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + + __checkReturn efx_rc_t +efx_mcdi_get_nic_global( + __in efx_nic_t *enp, + __in uint32_t key, + __out uint32_t *valuep) +{ + efx_mcdi_req_t req; + uint8_t payload[MAX(MC_CMD_GET_NIC_GLOBAL_IN_LEN, + MC_CMD_GET_NIC_GLOBAL_OUT_LEN)]; + efx_rc_t rc; + + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_GET_NIC_GLOBAL; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN; + req.emr_out_buf = payload; + req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN; + + MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) { + rc = EMSGSIZE; + goto fail2; + } + + *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE); + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_mcdi_set_nic_global( + __in efx_nic_t *enp, + __in uint32_t key, + __in uint32_t value) +{ + efx_mcdi_req_t req; + uint8_t payload[MC_CMD_SET_NIC_GLOBAL_IN_LEN]; + efx_rc_t rc; + + (void) memset(payload, 0, sizeof (payload)); + req.emr_cmd = MC_CMD_SET_NIC_GLOBAL; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN; + req.emr_out_buf = NULL; + req.emr_out_length = 0; + + MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key); + MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value); + + efx_mcdi_execute(enp, &req); + + if (req.emr_rc != 0) { + rc = req.emr_rc; + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_nvram.c b/drivers/net/sfc/base/ef10_nvram.c index 1904597c..2883ec8f 100644 --- a/drivers/net/sfc/base/ef10_nvram.c +++ b/drivers/net/sfc/base/ef10_nvram.c @@ -7,7 +7,7 @@ #include "efx.h" #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM @@ -1349,12 +1349,16 @@ ef10_nvram_partn_read_tlv( */ retry = 10; do { - rc = ef10_nvram_read_tlv_segment(enp, partn, 0, - seg_data, partn_size); - } while ((rc == EAGAIN) && (--retry > 0)); + if ((rc = ef10_nvram_read_tlv_segment(enp, partn, 0, + seg_data, partn_size)) != 0) + --retry; + } while ((rc == EAGAIN) && (retry > 0)); if (rc != 0) { /* Failed to obtain consistent segment data */ + if (rc == EAGAIN) + rc = EIO; + goto fail4; } @@ -2152,6 +2156,20 @@ static ef10_parttbl_entry_t medford_parttbl[] = { PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE), }; +static ef10_parttbl_entry_t medford2_parttbl[] = { + /* partn ports nvtype */ + PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE), + PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN), + PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM), + PARTN_MAP_ENTRY(EXPROM_CONFIG, ALL, BOOTROM_CFG), + PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG), + PARTN_MAP_ENTRY(FPGA, ALL, FPGA), + PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP), + PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE), + PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM), + PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE), +}; + static __checkReturn efx_rc_t ef10_parttbl_get( __in efx_nic_t *enp, @@ -2169,6 +2187,11 @@ ef10_parttbl_get( *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl); break; + case EFX_FAMILY_MEDFORD2: + *parttblp = medford2_parttbl; + *parttbl_rowsp = EFX_ARRAY_SIZE(medford2_parttbl); + break; + default: EFSYS_ASSERT(B_FALSE); return (EINVAL); @@ -2362,4 +2385,4 @@ fail1: #endif /* EFSYS_OPT_NVRAM */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_phy.c b/drivers/net/sfc/base/ef10_phy.c index aa8d6a2b..84acb70a 100644 --- a/drivers/net/sfc/base/ef10_phy.c +++ b/drivers/net/sfc/base/ef10_phy.c @@ -7,7 +7,7 @@ #include "efx.h" #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static void mcdi_phy_decode_cap( @@ -16,6 +16,32 @@ mcdi_phy_decode_cap( { uint32_t mask; +#define CHECK_CAP(_cap) \ + EFX_STATIC_ASSERT(EFX_PHY_CAP_##_cap == MC_CMD_PHY_CAP_##_cap##_LBN) + + CHECK_CAP(10HDX); + CHECK_CAP(10FDX); + CHECK_CAP(100HDX); + CHECK_CAP(100FDX); + CHECK_CAP(1000HDX); + CHECK_CAP(1000FDX); + CHECK_CAP(10000FDX); + CHECK_CAP(25000FDX); + CHECK_CAP(40000FDX); + CHECK_CAP(50000FDX); + CHECK_CAP(100000FDX); + CHECK_CAP(PAUSE); + CHECK_CAP(ASYM); + CHECK_CAP(AN); + CHECK_CAP(DDM); + CHECK_CAP(BASER_FEC); + CHECK_CAP(BASER_FEC_REQUESTED); + CHECK_CAP(RS_FEC); + CHECK_CAP(RS_FEC_REQUESTED); + CHECK_CAP(25G_BASER_FEC); + CHECK_CAP(25G_BASER_FEC_REQUESTED); +#undef CHECK_CAP + mask = 0; if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) mask |= (1 << EFX_PHY_CAP_10HDX); @@ -31,8 +57,15 @@ mcdi_phy_decode_cap( mask |= (1 << EFX_PHY_CAP_1000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_10000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_25000FDX); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) mask |= (1 << EFX_PHY_CAP_40000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_50000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) + mask |= (1 << EFX_PHY_CAP_100000FDX); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) mask |= (1 << EFX_PHY_CAP_PAUSE); if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) @@ -40,6 +73,22 @@ mcdi_phy_decode_cap( if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) mask |= (1 << EFX_PHY_CAP_AN); + /* FEC caps (supported on Medford2 and later) */ + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN)) + mask |= (1 << EFX_PHY_CAP_BASER_FEC); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN)) + mask |= (1 << EFX_PHY_CAP_BASER_FEC_REQUESTED); + + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) + mask |= (1 << EFX_PHY_CAP_RS_FEC); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN)) + mask |= (1 << EFX_PHY_CAP_RS_FEC_REQUESTED); + + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)) + mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC); + if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)) + mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED); + *maskp = mask; } @@ -61,8 +110,14 @@ mcdi_phy_decode_link_mode( if (!up) *link_modep = EFX_LINK_DOWN; + else if (speed == 100000 && fd) + *link_modep = EFX_LINK_100000FDX; + else if (speed == 50000 && fd) + *link_modep = EFX_LINK_50000FDX; else if (speed == 40000 && fd) *link_modep = EFX_LINK_40000FDX; + else if (speed == 25000 && fd) + *link_modep = EFX_LINK_25000FDX; else if (speed == 10000 && fd) *link_modep = EFX_LINK_10000FDX; else if (speed == 1000) @@ -116,9 +171,18 @@ ef10_phy_link_ev( case MCDI_EVENT_LINKCHANGE_SPEED_10G: speed = 10000; break; + case MCDI_EVENT_LINKCHANGE_SPEED_25G: + speed = 25000; + break; case MCDI_EVENT_LINKCHANGE_SPEED_40G: speed = 40000; break; + case MCDI_EVENT_LINKCHANGE_SPEED_50G: + speed = 50000; + break; + case MCDI_EVENT_LINKCHANGE_SPEED_100G: + speed = 100000; + break; default: speed = 0; break; @@ -212,26 +276,10 @@ ef10_phy_get_link( &elsp->els_link_mode, &elsp->els_fcntl); #if EFSYS_OPT_LOOPBACK - /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); - + /* + * MC_CMD_LOOPBACK and EFX_LOOPBACK names are equivalent, so use the + * MCDI value directly. Agreement is checked in efx_loopback_mask(). + */ elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE); #endif /* EFSYS_OPT_LOOPBACK */ @@ -288,8 +336,33 @@ ef10_phy_reconfigure( PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1, PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1); /* Too many fields for for POPULATE macros, so insert this afterwards */ + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_25000FDX, (cap_mask >> EFX_PHY_CAP_25000FDX) & 0x1); MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_50000FDX, (cap_mask >> EFX_PHY_CAP_50000FDX) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_100000FDX, (cap_mask >> EFX_PHY_CAP_100000FDX) & 0x1); + + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_BASER_FEC, (cap_mask >> EFX_PHY_CAP_BASER_FEC) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_BASER_FEC_REQUESTED, + (cap_mask >> EFX_PHY_CAP_BASER_FEC_REQUESTED) & 0x1); + + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_RS_FEC, (cap_mask >> EFX_PHY_CAP_RS_FEC) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_RS_FEC_REQUESTED, + (cap_mask >> EFX_PHY_CAP_RS_FEC_REQUESTED) & 0x1); + + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_25G_BASER_FEC, + (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC) & 0x1); + MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP, + PHY_CAP_25G_BASER_FEC_REQUESTED, + (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC_REQUESTED) & 0x1); #if EFSYS_OPT_LOOPBACK MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, @@ -304,9 +377,18 @@ ef10_phy_reconfigure( case EFX_LINK_10000FDX: speed = 10000; break; + case EFX_LINK_25000FDX: + speed = 25000; + break; case EFX_LINK_40000FDX: speed = 40000; break; + case EFX_LINK_50000FDX: + speed = 50000; + break; + case EFX_LINK_100000FDX: + speed = 100000; + break; default: speed = 0; } @@ -606,4 +688,4 @@ ef10_bist_stop( #endif /* EFSYS_OPT_BIST */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c index 2bb6705d..313a3691 100644 --- a/drivers/net/sfc/base/ef10_rx.c +++ b/drivers/net/sfc/base/ef10_rx.c @@ -8,7 +8,7 @@ #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static __checkReturn efx_rc_t @@ -21,12 +21,16 @@ efx_mcdi_init_rxq( __in efsys_mem_t *esmp, __in boolean_t disable_scatter, __in boolean_t want_inner_classes, - __in uint32_t ps_bufsize) + __in uint32_t ps_bufsize, + __in uint32_t es_bufs_per_desc, + __in uint32_t es_max_dma_len, + __in uint32_t es_buf_stride, + __in uint32_t hol_block_timeout) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; - uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN, - MC_CMD_INIT_RXQ_EXT_OUT_LEN)]; + uint8_t payload[MAX(MC_CMD_INIT_RXQ_V3_IN_LEN, + MC_CMD_INIT_RXQ_V3_OUT_LEN)]; int npages = EFX_RXQ_NBUFS(ndescs); int i; efx_qword_t *dma_addr; @@ -37,8 +41,15 @@ efx_mcdi_init_rxq( EFSYS_ASSERT3U(ndescs, <=, EFX_RXQ_MAXNDESCS); + if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_RXQ_SIZE(ndescs))) { + rc = EINVAL; + goto fail1; + } + if (ps_bufsize > 0) dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM; + else if (es_bufs_per_desc > 0) + dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER; else dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET; @@ -65,9 +76,9 @@ efx_mcdi_init_rxq( (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_INIT_RXQ; req.emr_in_buf = payload; - req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN; + req.emr_in_length = MC_CMD_INIT_RXQ_V3_IN_LEN; req.emr_out_buf = payload; - req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN; + req.emr_out_length = MC_CMD_INIT_RXQ_V3_OUT_LEN; MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq); @@ -87,6 +98,19 @@ efx_mcdi_init_rxq( MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0); MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + if (es_bufs_per_desc > 0) { + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET, + es_bufs_per_desc); + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len); + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride); + MCDI_IN_SET_DWORD(req, + INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT, + hol_block_timeout); + } + dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR); addr = EFSYS_MEM_ADDR(esmp); @@ -103,11 +127,13 @@ efx_mcdi_init_rxq( if (req.emr_rc != 0) { rc = req.emr_rc; - goto fail1; + goto fail2; } return (0); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); @@ -291,11 +317,34 @@ efx_mcdi_rss_context_set_flags( __in uint32_t rss_context, __in efx_rx_hash_type_t type) { + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + efx_rx_hash_type_t type_ipv4; + efx_rx_hash_type_t type_ipv4_tcp; + efx_rx_hash_type_t type_ipv6; + efx_rx_hash_type_t type_ipv6_tcp; + efx_rx_hash_type_t modes; efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN, MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)]; efx_rc_t rc; + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_LBN == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN); + EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_WIDTH == + MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH); + if (rss_context == EF10_RSS_CONTEXT_INVALID) { rc = EINVAL; goto fail1; @@ -311,15 +360,57 @@ efx_mcdi_rss_context_set_flags( MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, rss_context); - MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, + type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) | EFX_RX_HASH(IPV4_TCP, 2TUPLE) | + EFX_RX_HASH(IPV4_UDP, 2TUPLE); + type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE); + type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) | EFX_RX_HASH(IPV6_TCP, 2TUPLE) | + EFX_RX_HASH(IPV6_UDP, 2TUPLE); + type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE); + + /* + * Create a copy of the original hash type. + * The copy will be used to fill in RSS_MODE bits and + * may be cleared beforehand. The original variable + * and, thus, EN bits will remain unaffected. + */ + modes = type; + + /* + * If the firmware lacks support for additional modes, RSS_MODE + * fields must contain zeros, otherwise the operation will fail. + */ + if (encp->enc_rx_scale_additional_modes_supported == B_FALSE) + modes = 0; + +#define EXTRACT_RSS_MODE(_type, _class) \ + (EFX_EXTRACT_NATIVE(_type, 0, 31, \ + EFX_LOW_BIT(EFX_RX_CLASS_##_class), \ + EFX_HIGH_BIT(EFX_RX_CLASS_##_class)) & \ + EFX_MASK32(EFX_RX_CLASS_##_class)) + + MCDI_IN_POPULATE_DWORD_10(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN, - (type & EFX_RX_HASH_IPV4) ? 1 : 0, + ((type & type_ipv4) == type_ipv4) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN, - (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0, + ((type & type_ipv4_tcp) == type_ipv4_tcp) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN, - (type & EFX_RX_HASH_IPV6) ? 1 : 0, + ((type & type_ipv6) == type_ipv6) ? 1 : 0, RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN, - (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0); + ((type & type_ipv6_tcp) == type_ipv6_tcp) ? 1 : 0, + RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV4_TCP), + RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV4_UDP), + RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV4), + RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV6_TCP), + RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV6_UDP), + RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE, + EXTRACT_RSS_MODE(modes, IPV6)); + +#undef EXTRACT_RSS_MODE efx_mcdi_execute(enp, &req); @@ -544,12 +635,13 @@ ef10_rx_scale_mode_set( __in efx_rx_hash_type_t type, __in boolean_t insert) { + efx_nic_cfg_t *encp = &enp->en_nic_cfg; efx_rc_t rc; - EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ); EFSYS_ASSERT3U(insert, ==, B_TRUE); - if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) { + if ((encp->enc_rx_scale_hash_alg_mask & (1U << alg)) == 0 || + insert == B_FALSE) { rc = EINVAL; goto fail1; } @@ -698,6 +790,7 @@ ef10_rx_prefix_hash( _NOTE(ARGUNUSED(enp)) switch (func) { + case EFX_RX_HASHALG_PACKED_STREAM: case EFX_RX_HASHALG_TOEPLITZ: return (buffer[0] | (buffer[1] << 8) | @@ -795,8 +888,8 @@ ef10_rx_qpush( EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1, wptr, pushed & erp->er_mask); EFSYS_PIO_WRITE_BARRIER(); - EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, - erp->er_index, &dword, B_FALSE); + EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, + erp->er_index, &dword, B_FALSE); } #if EFSYS_OPT_RX_PACKED_STREAM @@ -827,7 +920,7 @@ ef10_rx_qpush_ps_credits( ERF_DZ_RX_DESC_MAGIC_CMD, ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS, ERF_DZ_RX_DESC_MAGIC_DATA, credits); - EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, + EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG, erp->er_index, &dword, B_FALSE); rxq_state->eers_rx_packed_stream_credits = 0; @@ -926,7 +1019,7 @@ ef10_rx_qcreate( __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, - __in uint32_t type_data, + __in const efx_rxq_type_data_t *type_data, __in efsys_mem_t *esmp, __in size_t ndescs, __in uint32_t id, @@ -939,6 +1032,10 @@ ef10_rx_qcreate( boolean_t disable_scatter; boolean_t want_inner_classes; unsigned int ps_buf_size; + uint32_t es_bufs_per_desc = 0; + uint32_t es_max_dma_len = 0; + uint32_t es_buf_stride = 0; + uint32_t hol_block_timeout = 0; _NOTE(ARGUNUSED(id, erp, type_data)) @@ -965,7 +1062,7 @@ ef10_rx_qcreate( break; #if EFSYS_OPT_RX_PACKED_STREAM case EFX_RXQ_TYPE_PACKED_STREAM: - switch (type_data) { + switch (type_data->ertd_packed_stream.eps_buf_size) { case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M: ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M; break; @@ -987,6 +1084,19 @@ ef10_rx_qcreate( } break; #endif /* EFSYS_OPT_RX_PACKED_STREAM */ +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + case EFX_RXQ_TYPE_ES_SUPER_BUFFER: + ps_buf_size = 0; + es_bufs_per_desc = + type_data->ertd_es_super_buffer.eessb_bufs_per_desc; + es_max_dma_len = + type_data->ertd_es_super_buffer.eessb_max_dma_len; + es_buf_stride = + type_data->ertd_es_super_buffer.eessb_buf_stride; + hol_block_timeout = + type_data->ertd_es_super_buffer.eessb_hol_block_timeout; + break; +#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ default: rc = ENOTSUP; goto fail4; @@ -1010,6 +1120,27 @@ ef10_rx_qcreate( EFSYS_ASSERT(ps_buf_size == 0); #endif /* EFSYS_OPT_RX_PACKED_STREAM */ +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + if (es_bufs_per_desc > 0) { + if (encp->enc_rx_es_super_buffer_supported == B_FALSE) { + rc = ENOTSUP; + goto fail7; + } + if (!IS_P2ALIGNED(es_max_dma_len, + EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) { + rc = EINVAL; + goto fail8; + } + if (!IS_P2ALIGNED(es_buf_stride, + EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) { + rc = EINVAL; + goto fail9; + } + } +#else /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ + EFSYS_ASSERT(es_bufs_per_desc == 0); +#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ + /* Scatter can only be disabled if the firmware supports doing so */ if (flags & EFX_RXQ_FLAG_SCATTER) disable_scatter = B_FALSE; @@ -1023,8 +1154,9 @@ ef10_rx_qcreate( if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep->ee_index, label, index, esmp, disable_scatter, want_inner_classes, - ps_buf_size)) != 0) - goto fail7; + ps_buf_size, es_bufs_per_desc, es_max_dma_len, + es_buf_stride, hol_block_timeout)) != 0) + goto fail10; erp->er_eep = eep; erp->er_label = label; @@ -1035,8 +1167,16 @@ ef10_rx_qcreate( return (0); +fail10: + EFSYS_PROBE(fail10); +#if EFSYS_OPT_RX_ES_SUPER_BUFFER +fail9: + EFSYS_PROBE(fail9); +fail8: + EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); +#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */ #if EFSYS_OPT_RX_PACKED_STREAM fail6: EFSYS_PROBE(fail6); @@ -1087,4 +1227,4 @@ ef10_rx_fini( #endif /* EFSYS_OPT_RX_SCALE */ } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_signed_image_layout.h b/drivers/net/sfc/base/ef10_signed_image_layout.h new file mode 100644 index 00000000..a35d1601 --- /dev/null +++ b/drivers/net/sfc/base/ef10_signed_image_layout.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016-2018 Solarflare Communications Inc. + * All rights reserved. + */ + +/* These structures define the layouts for the signed firmware image binary + * saved in NVRAM. The original image is in the Cryptographic message + * syntax (CMS) format which contains the bootable firmware binary plus the + * signatures. The entire image is written into NVRAM to enable the firmware + * to validate the signatures. However, the bootrom still requires the + * bootable-image to start at offset 0 of the NVRAM partition. Hence the image + * is parsed upfront by host utilities (sfupdate) and written into nvram as + * 'signed_image_chunks' described by a header. + * + * This file is used by the MC as well as host-utilities (sfupdate). + */ + + +#ifndef CI_MGMT_SIGNED_IMAGE_LAYOUT_H +#define CI_MGMT_SIGNED_IMAGE_LAYOUT_H + +/* Signed image chunk type identifiers */ +enum { + SIGNED_IMAGE_CHUNK_CMS_HEADER, /* CMS header describing the signed data */ + SIGNED_IMAGE_CHUNK_REFLASH_HEADER, /* Reflash header */ + SIGNED_IMAGE_CHUNK_IMAGE, /* Bootable binary image */ + SIGNED_IMAGE_CHUNK_REFLASH_TRAILER, /* Reflash trailer */ + SIGNED_IMAGE_CHUNK_SIGNATURE, /* Remaining contents of the signed image, + * including the certifiates and signature */ + NUM_SIGNED_IMAGE_CHUNKS, +}; + +/* Magic */ +#define SIGNED_IMAGE_CHUNK_HDR_MAGIC 0xEF105161 /* EF10 SIGned Image */ + +/* Initial version definition - version 1 */ +#define SIGNED_IMAGE_CHUNK_HDR_VERSION 0x1 + +/* Header length is 32 bytes */ +#define SIGNED_IMAGE_CHUNK_HDR_LEN 32 +/* Structure describing the header of each chunk of signed image + * as stored in nvram + */ +typedef struct signed_image_chunk_hdr_e { + /* Magic field to recognise a valid entry + * should match SIGNED_IMAGE_CHUNK_HDR_MAGIC + */ + uint32_t magic; + /* Version number of this header */ + uint32_t version; + /* Chunk type identifier */ + uint32_t id; + /* Chunk offset */ + uint32_t offset; + /* Chunk length */ + uint32_t len; + /* Reserved for future expansion of this structure - always set to zeros */ + uint32_t reserved[3]; +} signed_image_chunk_hdr_t; + +#endif /* CI_MGMT_SIGNED_IMAGE_LAYOUT_H */ diff --git a/drivers/net/sfc/base/ef10_tlv_layout.h b/drivers/net/sfc/base/ef10_tlv_layout.h index 2473a66a..56cffaee 100644 --- a/drivers/net/sfc/base/ef10_tlv_layout.h +++ b/drivers/net/sfc/base/ef10_tlv_layout.h @@ -4,6 +4,14 @@ * All rights reserved. */ +/* + * This is NOT the original source file. Do NOT edit it. + * To update the tlv layout, please edit the copy in + * the sfregistry repo and then, in that repo, + * "make tlv_headers" or "make export" to + * regenerate and export all types of headers. + */ + /* These structures define the layouts for the TLV items stored in static and * dynamic configuration partitions in NVRAM for EF10 (Huntington etc.). * @@ -32,6 +40,7 @@ * 1: dynamic configuration * 2: firmware internal use * 3: license partition + * 4: tsa configuration * * - TTT is a type, which is just a unique value. The same type value * might appear in both locations, indicating a relationship between @@ -407,6 +416,8 @@ struct tlv_firmware_options { #define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \ MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 #define TLV_FIRMWARE_VARIANT_RULES_ENGINE MC_CMD_FW_RULES_ENGINE +#define TLV_FIRMWARE_VARIANT_DPDK MC_CMD_FW_DPDK +#define TLV_FIRMWARE_VARIANT_L3XUDP MC_CMD_FW_L3XUDP }; /* Voltage settings @@ -525,6 +536,17 @@ struct tlv_pcie_config_r2 { * number of externally visible ports (and, hence, PF to port mapping), so must * be done at boot time. * + * Port mode naming convention is + * + * [nports_on_cage0]x[port_lane_width]_[nports_on_cage1]x[port_lane_width] + * + * Port lane width determines the capabilities (speeds) of the ports, subject + * to architecture capabilities (e.g. 25G support) and switch bandwidth + * constraints: + * - single lane ports can do 25G/10G/1G + * - dual lane ports can do 50G/25G/10G/1G (with fallback to 1 lane) + * - quad lane ports can do 100G/40G/50G/25G/10G/1G (with fallback to 2 or 1 lanes) + * This tag supercedes tlv_global_port_config. */ @@ -535,18 +557,58 @@ struct tlv_global_port_mode { uint32_t length; uint32_t port_mode; #define TLV_PORT_MODE_DEFAULT (0xffffffff) /* Default for given platform */ -#define TLV_PORT_MODE_10G (0) /* 10G, single SFP/10G-KR */ -#define TLV_PORT_MODE_40G (1) /* 40G, single QSFP/40G-KR */ -#define TLV_PORT_MODE_10G_10G (2) /* 2x10G, dual SFP/10G-KR or single QSFP */ -#define TLV_PORT_MODE_40G_40G (3) /* 40G + 40G, dual QSFP/40G-KR (Greenport, Medford) */ -#define TLV_PORT_MODE_10G_10G_10G_10G (4) /* 2x10G + 2x10G, quad SFP/10G-KR or dual QSFP (Greenport) */ -#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4) /* 4x10G, single QSFP, cage 0 (Medford) */ -#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) /* 4x10G, single QSFP, cage 0 (Medford) OBSOLETE DO NOT USE */ -#define TLV_PORT_MODE_40G_10G_10G (6) /* 1x40G + 2x10G, dual QSFP (Greenport, Medford) */ -#define TLV_PORT_MODE_10G_10G_40G (7) /* 2x10G + 1x40G, dual QSFP (Greenport, Medford) */ -#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) /* 4x10G, single QSFP, cage 1 (Medford) */ -#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9) /* 2x10G + 2x10G, dual QSFP (Medford) */ -#define TLV_PORT_MODE_MAX TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 + +/* Huntington port modes */ +#define TLV_PORT_MODE_10G (0) +#define TLV_PORT_MODE_40G (1) +#define TLV_PORT_MODE_10G_10G (2) +#define TLV_PORT_MODE_40G_40G (3) +#define TLV_PORT_MODE_10G_10G_10G_10G (4) +#define TLV_PORT_MODE_40G_10G_10G (6) +#define TLV_PORT_MODE_10G_10G_40G (7) + +/* Medford (and later) port modes */ +#define TLV_PORT_MODE_1x1_NA (0) /* Single 10G/25G on mdi0 */ +#define TLV_PORT_MODE_1x4_NA (1) /* Single 100G/40G on mdi0 */ +#define TLV_PORT_MODE_NA_1x4 (22) /* Single 100G/40G on mdi1 */ +#define TLV_PORT_MODE_1x2_NA (10) /* Single 50G on mdi0 */ +#define TLV_PORT_MODE_NA_1x2 (11) /* Single 50G on mdi1 */ +#define TLV_PORT_MODE_1x1_1x1 (2) /* Single 10G/25G on mdi0, single 10G/25G on mdi1 */ +#define TLV_PORT_MODE_1x4_1x4 (3) /* Single 40G on mdi0, single 40G on mdi1 */ +#define TLV_PORT_MODE_2x1_2x1 (5) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1 */ +#define TLV_PORT_MODE_4x1_NA (4) /* Quad 10G/25G on mdi0 */ +#define TLV_PORT_MODE_NA_4x1 (8) /* Quad 10G/25G on mdi1 */ +#define TLV_PORT_MODE_1x4_2x1 (6) /* Single 40G on mdi0, dual 10G/25G on mdi1 */ +#define TLV_PORT_MODE_2x1_1x4 (7) /* Dual 10G/25G on mdi0, single 40G on mdi1 */ +#define TLV_PORT_MODE_1x2_1x2 (12) /* Single 50G on mdi0, single 50G on mdi1 */ +#define TLV_PORT_MODE_2x2_NA (13) /* Dual 50G on mdi0 */ +#define TLV_PORT_MODE_NA_2x2 (14) /* Dual 50G on mdi1 */ +#define TLV_PORT_MODE_1x4_1x2 (15) /* Single 40G on mdi0, single 50G on mdi1 */ +#define TLV_PORT_MODE_1x2_1x4 (16) /* Single 50G on mdi0, single 40G on mdi1 */ +#define TLV_PORT_MODE_1x2_2x1 (17) /* Single 50G on mdi0, dual 10G/25G on mdi1 */ +#define TLV_PORT_MODE_2x1_1x2 (18) /* Dual 10G/25G on mdi0, single 50G on mdi1 */ + +/* Snapper-only Medford2 port modes. + * These modes are eftest only, to allow snapper explicit + * selection between multi-channel and LLPCS. In production, + * this selection is automatic and outside world should not + * care about LLPCS. + */ +#define TLV_PORT_MODE_2x1_2x1_LL (19) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1, low-latency PCS */ +#define TLV_PORT_MODE_4x1_NA_LL (20) /* Quad 10G/25G on mdi0, low-latency PCS */ +#define TLV_PORT_MODE_NA_4x1_LL (21) /* Quad 10G/25G on mdi1, low-latency PCS */ +#define TLV_PORT_MODE_1x1_NA_LL (23) /* Single 10G/25G on mdi0, low-latency PCS */ +#define TLV_PORT_MODE_1x1_1x1_LL (24) /* Single 10G/25G on mdi0, single 10G/25G on mdi1, low-latency PCS */ +#define TLV_PORT_MODE_BUG63720_DO_NOT_USE (9) /* bug63720: Do not use */ +#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL + +/* Deprecated Medford aliases - DO NOT USE IN NEW CODE */ +#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) +#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4) +#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) +#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9) + +#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL }; /* Type of the v-switch created implicitly by the firmware */ @@ -791,7 +853,7 @@ typedef struct tlv_license { uint8_t data[]; } tlv_license_t; -/* TSA NIC IP address configuration +/* TSA NIC IP address configuration (DEPRECATED) * * Sets the TSA NIC IP address statically via configuration tool or dynamically * via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop) @@ -801,7 +863,7 @@ typedef struct tlv_license { * released code yet. */ -#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000) +#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000) /* DEPRECATED */ #define TLV_TSAN_IP_MODE_STATIC (0) #define TLV_TSAN_IP_MODE_DHCP (1) @@ -818,7 +880,7 @@ typedef struct tlv_tsan_config { uint32_t bind_bkout; /* DEPRECATED */ } tlv_tsan_config_t; -/* TSA Controller IP address configuration +/* TSA Controller IP address configuration (DEPRECATED) * * Sets the TSA Controller IP address statically via configuration tool * @@ -827,7 +889,7 @@ typedef struct tlv_tsan_config { * released code yet. */ -#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000) +#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000) /* DEPRECATED */ #define TLV_MAX_TSACS (4) typedef struct tlv_tsac_config { @@ -838,7 +900,7 @@ typedef struct tlv_tsac_config { uint32_t port[TLV_MAX_TSACS]; } tlv_tsac_config_t; -/* Binding ticket +/* Binding ticket (DEPRECATED) * * Sets the TSA NIC binding ticket used for binding process between the TSA NIC * and the TSA Controller @@ -848,7 +910,7 @@ typedef struct tlv_tsac_config { * released code yet. */ -#define TLV_TAG_TMP_BINDING_TICKET (0x10240000) +#define TLV_TAG_TMP_BINDING_TICKET (0x10240000) /* DEPRECATED */ typedef struct tlv_binding_ticket { uint32_t tag; @@ -873,7 +935,7 @@ typedef struct tlv_pik_sf { uint8_t bytes[]; } tlv_pik_sf_t; -/* CA root certificate +/* CA root certificate (DEPRECATED) * * Sets the CA root certificate used for TSA Controller verfication during * TLS connection setup between the TSA NIC and the TSA Controller @@ -883,7 +945,7 @@ typedef struct tlv_pik_sf { * released code yet. */ -#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000) +#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000) /* DEPRECATED */ typedef struct tlv_ca_root_cert { uint32_t tag; @@ -933,4 +995,17 @@ struct tlv_fastpd_mode { #define TLV_FASTPD_MODE_FAST_SUPPORTED 2 /* Supported packet types to the FastPD; everything else to the SoftPD */ }; +/* L3xUDP datapath firmware UDP port configuration + * + * Sets the list of UDP ports on which the encapsulation will be handled. + * The number of ports in the list is implied by the length of the TLV item. + */ +#define TLV_TAG_L3XUDP_PORTS (0x102a0000) +struct tlv_l3xudp_ports { + uint32_t tag; + uint32_t length; + uint16_t ports[]; +#define TLV_TAG_L3XUDP_PORTS_MAX_NUM_PORTS 16 +}; + #endif /* CI_MGMT_TLV_LAYOUT_H */ diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c index 69c75700..7d27f710 100644 --- a/drivers/net/sfc/base/ef10_tx.c +++ b/drivers/net/sfc/base/ef10_tx.c @@ -8,7 +8,7 @@ #include "efx_impl.h" -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #if EFSYS_OPT_QSTATS #define EFX_TX_QSTAT_INCR(_etp, _stat) \ @@ -42,10 +42,15 @@ efx_mcdi_init_txq( EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >= EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs)); + if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_TXQ_SIZE(ndescs))) { + rc = EINVAL; + goto fail1; + } + npages = EFX_TXQ_NBUFS(ndescs); if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { rc = EINVAL; - goto fail1; + goto fail2; } (void) memset(payload, 0, sizeof (payload)); @@ -94,11 +99,13 @@ efx_mcdi_init_txq( if (req.emr_rc != 0) { rc = req.emr_rc; - goto fail2; + goto fail3; } return (0); +fail3: + EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: @@ -176,7 +183,7 @@ ef10_tx_qcreate( { efx_nic_cfg_t *encp = &enp->en_nic_cfg; uint16_t inner_csum; - efx_qword_t desc; + efx_desc_t desc; efx_rc_t rc; _NOTE(ARGUNUSED(id)) @@ -201,19 +208,9 @@ ef10_tx_qcreate( * a no-op TX option descriptor. See bug29981 for details. */ *addedp = 1; - EFX_POPULATE_QWORD_6(desc, - ESF_DZ_TX_DESC_IS_OPT, 1, - ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, - ESF_DZ_TX_OPTION_UDP_TCP_CSUM, - (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, - ESF_DZ_TX_OPTION_IP_CSUM, - (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0, - ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, - (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, - ESF_DZ_TX_OPTION_INNER_IP_CSUM, - (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0); + ef10_tx_qdesc_checksum_create(etp, flags, &desc); - EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc); + EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq); ef10_tx_qpush(etp, *addedp, 0); return (0); @@ -511,8 +508,8 @@ ef10_tx_qpush( EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id); EFSYS_PIO_WRITE_BARRIER(); - EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, - etp->et_index, &oword); + EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, + etp->et_index, &oword); } else { efx_dword_t dword; @@ -527,8 +524,8 @@ ef10_tx_qpush( EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id); EFSYS_PIO_WRITE_BARRIER(); - EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, - etp->et_index, &dword, B_FALSE); + EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG, + etp->et_index, &dword, B_FALSE); } } @@ -626,6 +623,7 @@ ef10_tx_qdesc_tso_create( ef10_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, @@ -639,13 +637,14 @@ ef10_tx_qdesc_tso2_create( EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS); - EFX_POPULATE_QWORD_5(edp[0].ed_eq, + EFX_POPULATE_QWORD_6(edp[0].ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, ESF_DZ_TX_TSO_OPTION_TYPE, ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, ESF_DZ_TX_TSO_IP_ID, ipv4_id, + ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id, ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq); EFX_POPULATE_QWORD_4(edp[1].ed_eq, ESF_DZ_TX_DESC_IS_OPT, 1, @@ -675,6 +674,30 @@ ef10_tx_qdesc_vlantci_create( ESF_DZ_TX_VLAN_TAG1, tci); } + void +ef10_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp) +{ + _NOTE(ARGUNUSED(etp)); + + EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index, + uint32_t, flags); + + EFX_POPULATE_QWORD_6(edp->ed_eq, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM, + ESF_DZ_TX_OPTION_UDP_TCP_CSUM, + (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0, + ESF_DZ_TX_OPTION_IP_CSUM, + (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0, + ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, + (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, + ESF_DZ_TX_OPTION_INNER_IP_CSUM, + (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0); +} + __checkReturn efx_rc_t ef10_tx_qpace( @@ -752,4 +775,4 @@ ef10_tx_qstats_update( #endif /* EFSYS_OPT_QSTATS */ -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/ef10_vpd.c b/drivers/net/sfc/base/ef10_vpd.c index ad522e82..097fe1d4 100644 --- a/drivers/net/sfc/base/ef10_vpd.c +++ b/drivers/net/sfc/base/ef10_vpd.c @@ -10,7 +10,7 @@ #if EFSYS_OPT_VPD -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 #include "ef10_tlv_layout.h" @@ -26,7 +26,8 @@ ef10_vpd_init( EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (enp->en_nic_cfg.enc_vpd_is_global) { tag = TLV_TAG_GLOBAL_STATIC_VPD; @@ -82,7 +83,8 @@ ef10_vpd_size( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* * This function returns the total size the user should allocate @@ -115,7 +117,8 @@ ef10_vpd_read( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (enp->en_nic_cfg.enc_vpd_is_global) { tag = TLV_TAG_GLOBAL_DYNAMIC_VPD; @@ -133,19 +136,22 @@ ef10_vpd_read( rc = ENOSPC; goto fail2; } - memcpy(data, dvpd, dvpd_size); + if (dvpd != NULL) + memcpy(data, dvpd, dvpd_size); /* Pad data with all-1s, consistent with update operations */ memset(data + dvpd_size, 0xff, size - dvpd_size); - EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); + if (dvpd != NULL) + EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); return (0); fail2: EFSYS_PROBE(fail2); - EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); + if (dvpd != NULL) + EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); @@ -167,7 +173,8 @@ ef10_vpd_verify( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* * Strictly you could take the view that dynamic vpd is optional. @@ -288,7 +295,8 @@ ef10_vpd_get( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* Attempt to satisfy the request from svpd first */ if (enp->en_arch.ef10.ena_svpd_length > 0) { @@ -334,7 +342,8 @@ ef10_vpd_set( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); /* If the provided (tag,keyword) exists in svpd, then it is readonly */ if (enp->en_arch.ef10.ena_svpd_length > 0) { @@ -387,7 +396,8 @@ ef10_vpd_write( efx_rc_t rc; EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (enp->en_nic_cfg.enc_vpd_is_global) { tag = TLV_TAG_GLOBAL_DYNAMIC_VPD; @@ -423,7 +433,8 @@ ef10_vpd_fini( __in efx_nic_t *enp) { EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD); + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2); if (enp->en_arch.ef10.ena_svpd_length > 0) { EFSYS_KMEM_FREE(enp->en_esip, enp->en_arch.ef10.ena_svpd_length, @@ -434,6 +445,6 @@ ef10_vpd_fini( } } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ #endif /* EFSYS_OPT_VPD */ diff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h index fe996e7c..5108b9b1 100644 --- a/drivers/net/sfc/base/efx.h +++ b/drivers/net/sfc/base/efx.h @@ -40,6 +40,7 @@ typedef enum efx_family_e { EFX_FAMILY_SIENA, EFX_FAMILY_HUNTINGTON, EFX_FAMILY_MEDFORD, + EFX_FAMILY_MEDFORD2, EFX_FAMILY_NTYPES } efx_family_t; @@ -47,7 +48,8 @@ extern __checkReturn efx_rc_t efx_family( __in uint16_t venid, __in uint16_t devid, - __out efx_family_t *efp); + __out efx_family_t *efp, + __out unsigned int *membarp); #define EFX_PCI_VENID_SFC 0x1924 @@ -69,7 +71,21 @@ efx_family( #define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */ #define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */ -#define EFX_MEM_BAR 2 +#define EFX_PCI_DEVID_MEDFORD2_PF_UNINIT 0x0B13 +#define EFX_PCI_DEVID_MEDFORD2 0x0B03 /* SFC9250 PF */ +#define EFX_PCI_DEVID_MEDFORD2_VF 0x1B03 /* SFC9250 VF */ + + +#define EFX_MEM_BAR_SIENA 2 + +#define EFX_MEM_BAR_HUNTINGTON_PF 2 +#define EFX_MEM_BAR_HUNTINGTON_VF 0 + +#define EFX_MEM_BAR_MEDFORD_PF 2 +#define EFX_MEM_BAR_MEDFORD_VF 0 + +#define EFX_MEM_BAR_MEDFORD2 0 + /* Error codes */ @@ -113,9 +129,22 @@ efx_nic_create( __in efsys_lock_t *eslp, __deref_out efx_nic_t **enpp); +/* EFX_FW_VARIANT codes map one to one on MC_CMD_FW codes */ +typedef enum efx_fw_variant_e { + EFX_FW_VARIANT_FULL_FEATURED, + EFX_FW_VARIANT_LOW_LATENCY, + EFX_FW_VARIANT_PACKED_STREAM, + EFX_FW_VARIANT_HIGH_TX_RATE, + EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1, + EFX_FW_VARIANT_RULES_ENGINE, + EFX_FW_VARIANT_DPDK, + EFX_FW_VARIANT_DONT_CARE = 0xffffffff +} efx_fw_variant_t; + extern __checkReturn efx_rc_t efx_nic_probe( - __in efx_nic_t *enp); + __in efx_nic_t *enp, + __in efx_fw_variant_t efv); extern __checkReturn efx_rc_t efx_nic_init( @@ -171,7 +200,7 @@ efx_nic_check_pcie_link_speed( #if EFSYS_OPT_MCDI -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 /* Huntington and Medford require MCDIv2 commands */ #define WITH_MCDI_V2 1 #endif @@ -307,7 +336,7 @@ efx_intr_fini( #if EFSYS_OPT_MAC_STATS -/* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */ +/* START MKCONFIG GENERATED EfxHeaderMacBlock ea466a9bc8789994 */ typedef enum efx_mac_stat_e { EFX_MAC_RX_OCTETS, EFX_MAC_RX_PKTS, @@ -390,6 +419,31 @@ typedef enum efx_mac_stat_e { EFX_MAC_VADAPTER_TX_BAD_PACKETS, EFX_MAC_VADAPTER_TX_BAD_BYTES, EFX_MAC_VADAPTER_TX_OVERFLOW, + EFX_MAC_FEC_UNCORRECTED_ERRORS, + EFX_MAC_FEC_CORRECTED_ERRORS, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2, + EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3, + EFX_MAC_CTPIO_VI_BUSY_FALLBACK, + EFX_MAC_CTPIO_LONG_WRITE_SUCCESS, + EFX_MAC_CTPIO_MISSING_DBELL_FAIL, + EFX_MAC_CTPIO_OVERFLOW_FAIL, + EFX_MAC_CTPIO_UNDERFLOW_FAIL, + EFX_MAC_CTPIO_TIMEOUT_FAIL, + EFX_MAC_CTPIO_NONCONTIG_WR_FAIL, + EFX_MAC_CTPIO_FRM_CLOBBER_FAIL, + EFX_MAC_CTPIO_INVALID_WR_FAIL, + EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK, + EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK, + EFX_MAC_CTPIO_RUNT_FALLBACK, + EFX_MAC_CTPIO_SUCCESS, + EFX_MAC_CTPIO_FALLBACK, + EFX_MAC_CTPIO_POISON, + EFX_MAC_CTPIO_ERASE, + EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC, + EFX_MAC_RXDP_HLB_IDLE, + EFX_MAC_RXDP_HLB_TIMEOUT, EFX_MAC_NSTATS } efx_mac_stat_t; @@ -408,11 +462,16 @@ typedef enum efx_link_mode_e { EFX_LINK_1000FDX, EFX_LINK_10000FDX, EFX_LINK_40000FDX, + EFX_LINK_25000FDX, + EFX_LINK_50000FDX, + EFX_LINK_100000FDX, EFX_LINK_NMODES } efx_link_mode_t; #define EFX_MAC_ADDR_LEN 6 +#define EFX_VNI_OR_VSID_LEN 3 + #define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01) #define EFX_MAC_MULTICAST_LIST_MAX 256 @@ -536,7 +595,6 @@ efx_mac_stats_get_mask( ((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \ (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1)))) -#define EFX_MAC_STATS_SIZE 0x400 extern __checkReturn efx_rc_t efx_mac_stats_clear( @@ -545,8 +603,8 @@ efx_mac_stats_clear( /* * Upload mac statistics supported by the hardware into the given buffer. * - * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes, - * and page aligned. + * The DMA buffer must be 4Kbyte aligned and sized to hold at least + * efx_nic_cfg_t::enc_mac_stats_nstats 64bit counters. * * The hardware will only DMA statistics that it understands (of course). * Drivers should not make any assumptions about which statistics are @@ -603,7 +661,7 @@ efx_mon_init( #define EFX_MON_STATS_PAGE_SIZE 0x100 #define EFX_MON_MASK_ELEMENT_SIZE 32 -/* START MKCONFIG GENERATED MonitorHeaderStatsBlock aa0233c80156308e */ +/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 400fdb0517af1fca */ typedef enum efx_mon_stat_e { EFX_MON_STAT_2_5V, EFX_MON_STAT_VCCP1, @@ -684,6 +742,10 @@ typedef enum efx_mon_stat_e { EFX_MON_STAT_BOARD_BACK_TEMP, EFX_MON_STAT_I1V8, EFX_MON_STAT_I2V5, + EFX_MON_STAT_I3V3, + EFX_MON_STAT_I12V0, + EFX_MON_STAT_1_3V, + EFX_MON_STAT_I1V3, EFX_MON_NSTATS } efx_mon_stat_t; @@ -788,6 +850,9 @@ typedef enum efx_loopback_type_e { EFX_LOOPBACK_SD_FEP1_5_WS = 32, EFX_LOOPBACK_SD_FEP_WS = 33, EFX_LOOPBACK_SD_FES_WS = 34, + EFX_LOOPBACK_AOE_INT_NEAR = 35, + EFX_LOOPBACK_DATA_WS = 36, + EFX_LOOPBACK_FORCE_EXT_LINK = 37, EFX_LOOPBACK_NTYPES } efx_loopback_type_t; @@ -843,6 +908,16 @@ typedef enum efx_phy_cap_type_e { EFX_PHY_CAP_ASYM, EFX_PHY_CAP_AN, EFX_PHY_CAP_40000FDX, + EFX_PHY_CAP_DDM, + EFX_PHY_CAP_100000FDX, + EFX_PHY_CAP_25000FDX, + EFX_PHY_CAP_50000FDX, + EFX_PHY_CAP_BASER_FEC, + EFX_PHY_CAP_BASER_FEC_REQUESTED, + EFX_PHY_CAP_RS_FEC, + EFX_PHY_CAP_RS_FEC_REQUESTED, + EFX_PHY_CAP_25G_BASER_FEC, + EFX_PHY_CAP_25G_BASER_FEC_REQUESTED, EFX_PHY_CAP_NTYPES } efx_phy_cap_type_t; @@ -1080,6 +1155,13 @@ typedef enum efx_tunnel_protocol_e { EFX_TUNNEL_NPROTOS } efx_tunnel_protocol_t; +typedef enum efx_vi_window_shift_e { + EFX_VI_WINDOW_SHIFT_INVALID = 0, + EFX_VI_WINDOW_SHIFT_8K = 13, + EFX_VI_WINDOW_SHIFT_16K = 14, + EFX_VI_WINDOW_SHIFT_64K = 16, +} efx_vi_window_shift_t; + typedef struct efx_nic_cfg_s { uint32_t enc_board_type; uint32_t enc_phy_type; @@ -1093,6 +1175,7 @@ typedef struct efx_nic_cfg_s { uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32]; #endif unsigned int enc_features; + efx_vi_window_shift_t enc_vi_window_shift; uint8_t enc_mac_addr[6]; uint8_t enc_port; /* PHY port number */ uint32_t enc_intr_vec_base; @@ -1112,6 +1195,17 @@ typedef struct efx_nic_cfg_s { uint32_t enc_rx_buf_align_start; uint32_t enc_rx_buf_align_end; uint32_t enc_rx_scale_max_exclusive_contexts; + /* + * Mask of supported hash algorithms. + * Hash algorithm types are used as the bit indices. + */ + uint32_t enc_rx_scale_hash_alg_mask; + /* + * Indicates whether port numbers can be included to the + * input data for hash computation. + */ + boolean_t enc_rx_scale_l4_hash_supported; + boolean_t enc_rx_scale_additional_modes_supported; #if EFSYS_OPT_LOOPBACK efx_qword_t enc_loopback_types[EFX_LINK_NMODES]; #endif /* EFSYS_OPT_LOOPBACK */ @@ -1137,11 +1231,11 @@ typedef struct efx_nic_cfg_s { #if EFSYS_OPT_BIST uint32_t enc_bist_mask; #endif /* EFSYS_OPT_BIST */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 uint32_t enc_pf; uint32_t enc_vf; uint32_t enc_privilege_mask; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ boolean_t enc_bug26807_workaround; boolean_t enc_bug35388_workaround; boolean_t enc_bug41750_workaround; @@ -1165,6 +1259,7 @@ typedef struct efx_nic_cfg_s { uint32_t enc_tx_tso_tcp_header_offset_limit; boolean_t enc_fw_assisted_tso_enabled; boolean_t enc_fw_assisted_tso_v2_enabled; + boolean_t enc_fw_assisted_tso_v2_encap_enabled; /* Number of TSO contexts on the NIC (FATSOv2) */ uint32_t enc_fw_assisted_tso_v2_n_contexts; boolean_t enc_hw_tx_insert_vlan_enabled; @@ -1178,6 +1273,8 @@ typedef struct efx_nic_cfg_s { boolean_t enc_init_evq_v2_supported; boolean_t enc_rx_packed_stream_supported; boolean_t enc_rx_var_packed_stream_supported; + boolean_t enc_rx_es_super_buffer_supported; + boolean_t enc_fw_subvariant_no_tx_csum_supported; boolean_t enc_pm_and_rxdp_counters; boolean_t enc_mac_stats_40g_tx_size_bins; uint32_t enc_tunnel_encapsulations_supported; @@ -1196,6 +1293,14 @@ typedef struct efx_nic_cfg_s { uint32_t enc_max_pcie_link_gen; /* Firmware verifies integrity of NVRAM updates */ uint32_t enc_nvram_update_verify_result_supported; + /* Firmware support for extended MAC_STATS buffer */ + uint32_t enc_mac_stats_nstats; + boolean_t enc_fec_counters; + boolean_t enc_hlb_counters; + /* Firmware support for "FLAG" and "MARK" filter actions */ + boolean_t enc_filter_action_flag_supported; + boolean_t enc_filter_action_mark_supported; + uint32_t enc_filter_action_mark_max; } efx_nic_cfg_t; #define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff) @@ -1210,6 +1315,13 @@ extern const efx_nic_cfg_t * efx_nic_cfg_get( __in efx_nic_t *enp); +/* RxDPCPU firmware id values by which FW variant can be identified */ +#define EFX_RXDP_FULL_FEATURED_FW_ID 0x0 +#define EFX_RXDP_LOW_LATENCY_FW_ID 0x1 +#define EFX_RXDP_PACKED_STREAM_FW_ID 0x2 +#define EFX_RXDP_RULES_ENGINE_FW_ID 0x5 +#define EFX_RXDP_DPDK_FW_ID 0x6 + typedef struct efx_nic_fw_info_s { /* Basic FW version information */ uint16_t enfi_mc_fw_version[4]; @@ -1498,6 +1610,92 @@ efx_bootcfg_write( #endif /* EFSYS_OPT_BOOTCFG */ +#if EFSYS_OPT_IMAGE_LAYOUT + +#include "ef10_signed_image_layout.h" + +/* + * Image header used in unsigned and signed image layouts (see SF-102785-PS). + * + * NOTE: + * The image header format is extensible. However, older drivers require an + * exact match of image header version and header length when validating and + * writing firmware images. + * + * To avoid breaking backward compatibility, we use the upper bits of the + * controller version fields to contain an extra version number used for + * combined bootROM and UEFI ROM images on EF10 and later (to hold the UEFI ROM + * version). See bug39254 and SF-102785-PS for details. + */ +typedef struct efx_image_header_s { + uint32_t eih_magic; + uint32_t eih_version; + uint32_t eih_type; + uint32_t eih_subtype; + uint32_t eih_code_size; + uint32_t eih_size; + union { + uint32_t eih_controller_version_min; + struct { + uint16_t eih_controller_version_min_short; + uint8_t eih_extra_version_a; + uint8_t eih_extra_version_b; + }; + }; + union { + uint32_t eih_controller_version_max; + struct { + uint16_t eih_controller_version_max_short; + uint8_t eih_extra_version_c; + uint8_t eih_extra_version_d; + }; + }; + uint16_t eih_code_version_a; + uint16_t eih_code_version_b; + uint16_t eih_code_version_c; + uint16_t eih_code_version_d; +} efx_image_header_t; + +#define EFX_IMAGE_HEADER_SIZE (40) +#define EFX_IMAGE_HEADER_VERSION (4) +#define EFX_IMAGE_HEADER_MAGIC (0x106F1A5) + + +typedef struct efx_image_trailer_s { + uint32_t eit_crc; +} efx_image_trailer_t; + +#define EFX_IMAGE_TRAILER_SIZE (4) + +typedef enum efx_image_format_e { + EFX_IMAGE_FORMAT_NO_IMAGE, + EFX_IMAGE_FORMAT_INVALID, + EFX_IMAGE_FORMAT_UNSIGNED, + EFX_IMAGE_FORMAT_SIGNED, +} efx_image_format_t; + +typedef struct efx_image_info_s { + efx_image_format_t eii_format; + uint8_t * eii_imagep; + size_t eii_image_size; + efx_image_header_t * eii_headerp; +} efx_image_info_t; + +extern __checkReturn efx_rc_t +efx_check_reflash_image( + __in void *bufferp, + __in uint32_t buffer_size, + __out efx_image_info_t *infop); + +extern __checkReturn efx_rc_t +efx_build_signed_image_write_buffer( + __out uint8_t *bufferp, + __in uint32_t buffer_size, + __in efx_image_info_t *infop, + __out efx_image_header_t **headerpp); + +#endif /* EFSYS_OPT_IMAGE_LAYOUT */ + #if EFSYS_OPT_DIAG typedef enum efx_pattern_type_t { @@ -1674,7 +1872,7 @@ typedef __checkReturn boolean_t __in uint32_t size, __in uint16_t flags); -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER /* * Packed stream mode is documented in SF-112241-TC. @@ -1684,6 +1882,13 @@ typedef __checkReturn boolean_t * packets are put there in a continuous stream. * The main advantage of such an approach is that RX queue refilling * happens much less frequently. + * + * Equal stride packed stream mode is documented in SF-119419-TC. + * The general idea is to utilize advantages of the packed stream, + * but avoid indirection in packets representation. + * The main advantage of such an approach is that RX queue refilling + * happens much less frequently and packets buffers are independent + * from upper layers point of view. */ typedef __checkReturn boolean_t @@ -1784,7 +1989,7 @@ typedef __checkReturn boolean_t typedef struct efx_ev_callbacks_s { efx_initialized_ev_t eec_initialized; efx_rx_ev_t eec_rx; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER efx_rx_ps_ev_t eec_rx_ps; #endif efx_tx_ev_t eec_tx; @@ -1888,14 +2093,35 @@ efx_rx_scatter_enable( typedef enum efx_rx_hash_alg_e { EFX_RX_HASHALG_LFSR = 0, - EFX_RX_HASHALG_TOEPLITZ + EFX_RX_HASHALG_TOEPLITZ, + EFX_RX_HASHALG_PACKED_STREAM, + EFX_RX_NHASHALGS } efx_rx_hash_alg_t; +/* + * Legacy hash type flags. + * + * They represent standard tuples for distinct traffic classes. + */ #define EFX_RX_HASH_IPV4 (1U << 0) #define EFX_RX_HASH_TCPIPV4 (1U << 1) #define EFX_RX_HASH_IPV6 (1U << 2) #define EFX_RX_HASH_TCPIPV6 (1U << 3) +#define EFX_RX_HASH_LEGACY_MASK \ + (EFX_RX_HASH_IPV4 | \ + EFX_RX_HASH_TCPIPV4 | \ + EFX_RX_HASH_IPV6 | \ + EFX_RX_HASH_TCPIPV6) + +/* + * The type of the argument used by efx_rx_scale_mode_set() to + * provide a means for the client drivers to configure hashing. + * + * A properly constructed value can either be: + * - a combination of legacy flags + * - a combination of EFX_RX_HASH() flags + */ typedef unsigned int efx_rx_hash_type_t; typedef enum efx_rx_hash_support_e { @@ -1914,6 +2140,92 @@ typedef enum efx_rx_scale_context_type_e { EFX_RX_SCALE_SHARED /* Read-only key/indirection table */ } efx_rx_scale_context_type_t; +/* + * Traffic classes eligible for hash computation. + * + * Select packet headers used in computing the receive hash. + * This uses the same encoding as the RSS_MODES field of + * MC_CMD_RSS_CONTEXT_SET_FLAGS. + */ +#define EFX_RX_CLASS_IPV4_TCP_LBN 8 +#define EFX_RX_CLASS_IPV4_TCP_WIDTH 4 +#define EFX_RX_CLASS_IPV4_UDP_LBN 12 +#define EFX_RX_CLASS_IPV4_UDP_WIDTH 4 +#define EFX_RX_CLASS_IPV4_LBN 16 +#define EFX_RX_CLASS_IPV4_WIDTH 4 +#define EFX_RX_CLASS_IPV6_TCP_LBN 20 +#define EFX_RX_CLASS_IPV6_TCP_WIDTH 4 +#define EFX_RX_CLASS_IPV6_UDP_LBN 24 +#define EFX_RX_CLASS_IPV6_UDP_WIDTH 4 +#define EFX_RX_CLASS_IPV6_LBN 28 +#define EFX_RX_CLASS_IPV6_WIDTH 4 + +#define EFX_RX_NCLASSES 6 + +/* + * Ancillary flags used to construct generic hash tuples. + * This uses the same encoding as RSS_MODE_HASH_SELECTOR. + */ +#define EFX_RX_CLASS_HASH_SRC_ADDR (1U << 0) +#define EFX_RX_CLASS_HASH_DST_ADDR (1U << 1) +#define EFX_RX_CLASS_HASH_SRC_PORT (1U << 2) +#define EFX_RX_CLASS_HASH_DST_PORT (1U << 3) + +/* + * Generic hash tuples. + * + * They express combinations of packet fields + * which can contribute to the hash value for + * a particular traffic class. + */ +#define EFX_RX_CLASS_HASH_DISABLE 0 + +#define EFX_RX_CLASS_HASH_1TUPLE_SRC EFX_RX_CLASS_HASH_SRC_ADDR +#define EFX_RX_CLASS_HASH_1TUPLE_DST EFX_RX_CLASS_HASH_DST_ADDR + +#define EFX_RX_CLASS_HASH_2TUPLE \ + (EFX_RX_CLASS_HASH_SRC_ADDR | \ + EFX_RX_CLASS_HASH_DST_ADDR) + +#define EFX_RX_CLASS_HASH_2TUPLE_SRC \ + (EFX_RX_CLASS_HASH_SRC_ADDR | \ + EFX_RX_CLASS_HASH_SRC_PORT) + +#define EFX_RX_CLASS_HASH_2TUPLE_DST \ + (EFX_RX_CLASS_HASH_DST_ADDR | \ + EFX_RX_CLASS_HASH_DST_PORT) + +#define EFX_RX_CLASS_HASH_4TUPLE \ + (EFX_RX_CLASS_HASH_SRC_ADDR | \ + EFX_RX_CLASS_HASH_DST_ADDR | \ + EFX_RX_CLASS_HASH_SRC_PORT | \ + EFX_RX_CLASS_HASH_DST_PORT) + +#define EFX_RX_CLASS_HASH_NTUPLES 7 + +/* + * Hash flag constructor. + * + * Resulting flags encode hash tuples for specific traffic classes. + * The client drivers are encouraged to use these flags to form + * a hash type value. + */ +#define EFX_RX_HASH(_class, _tuple) \ + EFX_INSERT_FIELD_NATIVE32(0, 31, \ + EFX_RX_CLASS_##_class, EFX_RX_CLASS_HASH_##_tuple) + +/* + * The maximum number of EFX_RX_HASH() flags. + */ +#define EFX_RX_HASH_NFLAGS (EFX_RX_NCLASSES * EFX_RX_CLASS_HASH_NTUPLES) + +extern __checkReturn efx_rc_t +efx_rx_scale_hash_flags_get( + __in efx_nic_t *enp, + __in efx_rx_hash_alg_t hash_alg, + __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags, + __out unsigned int *nflagsp); + extern __checkReturn efx_rc_t efx_rx_hash_default_support_get( __in efx_nic_t *enp, @@ -1984,6 +2296,7 @@ efx_pseudo_hdr_pkt_length_get( typedef enum efx_rxq_type_e { EFX_RXQ_TYPE_DEFAULT, EFX_RXQ_TYPE_PACKED_STREAM, + EFX_RXQ_TYPE_ES_SUPER_BUFFER, EFX_RXQ_NTYPES } efx_rxq_type_t; @@ -2037,6 +2350,28 @@ efx_rx_qcreate_packed_stream( #endif +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + +/* Maximum head-of-line block timeout in nanoseconds */ +#define EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX (400U * 1000 * 1000) + +extern __checkReturn efx_rc_t +efx_rx_qcreate_es_super_buffer( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in uint32_t n_bufs_per_desc, + __in uint32_t max_dma_len, + __in uint32_t buf_stride, + __in uint32_t hol_block_timeout, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in unsigned int flags, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp); + +#endif + typedef struct efx_buffer_s { efsys_dma_addr_t eb_addr; size_t eb_size; @@ -2226,6 +2561,7 @@ extern void efx_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, @@ -2237,6 +2573,12 @@ efx_tx_qdesc_vlantci_create( __in uint16_t tci, __out efx_desc_t *edp); +extern void +efx_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp); + #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES @@ -2285,6 +2627,10 @@ efx_tx_qdestroy( #define EFX_FILTER_FLAG_RX 0x08 /* Filter is for TX */ #define EFX_FILTER_FLAG_TX 0x10 +/* Set match flag on the received packet */ +#define EFX_FILTER_FLAG_ACTION_FLAG 0x20 +/* Set match mark on the received packet */ +#define EFX_FILTER_FLAG_ACTION_MARK 0x40 typedef uint8_t efx_filter_flags_t; @@ -2313,10 +2659,19 @@ typedef uint8_t efx_filter_flags_t; #define EFX_FILTER_MATCH_OUTER_VID 0x00000100 /* Match by IP transport protocol */ #define EFX_FILTER_MATCH_IP_PROTO 0x00000200 +/* Match by VNI or VSID */ +#define EFX_FILTER_MATCH_VNI_OR_VSID 0x00000800 +/* For encapsulated packets, match by inner frame local MAC address */ +#define EFX_FILTER_MATCH_IFRM_LOC_MAC 0x00010000 /* For encapsulated packets, match all multicast inner frames */ #define EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 0x01000000 /* For encapsulated packets, match all unicast inner frames */ #define EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST 0x02000000 +/* + * Match by encap type, this flag does not correspond to + * the MCDI match flags and any unoccupied value may be used + */ +#define EFX_FILTER_MATCH_ENCAP_TYPE 0x20000000 /* Match otherwise-unmatched multicast and broadcast packets */ #define EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 0x40000000 /* Match otherwise-unmatched unicast packets */ @@ -2359,6 +2714,9 @@ typedef struct efx_filter_spec_s { uint16_t efs_rem_port; efx_oword_t efs_rem_host; efx_oword_t efs_loc_host; + uint8_t efs_vni_or_vsid[EFX_VNI_OR_VSID_LEN]; + uint8_t efs_ifrm_loc_mac[EFX_MAC_ADDR_LEN]; + uint32_t efs_mark; } efx_filter_spec_t; @@ -2454,6 +2812,13 @@ efx_filter_spec_set_encap_type( __in efx_tunnel_protocol_t encap_type, __in efx_filter_inner_frame_match_t inner_frame_match); +extern __checkReturn efx_rc_t +efx_filter_spec_set_vxlan_full( + __inout efx_filter_spec_t *spec, + __in const uint8_t *vxlan_id, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr); + #if EFSYS_OPT_RX_SCALE extern __checkReturn efx_rc_t efx_filter_spec_set_rss_context( @@ -2659,6 +3024,38 @@ efx_tunnel_reconfigure( #endif /* EFSYS_OPT_TUNNEL */ +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + +/** + * Firmware subvariant choice options. + * + * It may be switched to no Tx checksum if attached drivers are either + * preboot or firmware subvariant aware and no VIS are allocated. + * If may be always switched to default explicitly using set request or + * implicitly if unaware driver is attaching. If switching is done when + * a driver is attached, it gets MC_REBOOT event and should recreate its + * datapath. + * + * See SF-119419-TC DPDK Firmware Driver Interface and + * SF-109306-TC EF10 for Driver Writers for details. + */ +typedef enum efx_nic_fw_subvariant_e { + EFX_NIC_FW_SUBVARIANT_DEFAULT = 0, + EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM = 1, + EFX_NIC_FW_SUBVARIANT_NTYPES +} efx_nic_fw_subvariant_t; + +extern __checkReturn efx_rc_t +efx_nic_get_fw_subvariant( + __in efx_nic_t *enp, + __out efx_nic_fw_subvariant_t *subvariantp); + +extern __checkReturn efx_rc_t +efx_nic_set_fw_subvariant( + __in efx_nic_t *enp, + __in efx_nic_fw_subvariant_t subvariant); + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ #ifdef __cplusplus } diff --git a/drivers/net/sfc/base/efx_bootcfg.c b/drivers/net/sfc/base/efx_bootcfg.c index 0f71936f..715e18e8 100644 --- a/drivers/net/sfc/base/efx_bootcfg.c +++ b/drivers/net/sfc/base/efx_bootcfg.c @@ -68,6 +68,20 @@ efx_bootcfg_sector_info( } #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: { + /* Shared partition (array indexed by PF) */ + max_size = BOOTCFG_PER_PF; + count = BOOTCFG_PF_COUNT; + if (pf >= count) { + rc = EINVAL; + goto fail3; + } + offset = max_size * pf; + break; + } +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -82,6 +96,10 @@ efx_bootcfg_sector_info( return (0); +#if EFSYS_OPT_MEDFORD2 +fail3: + EFSYS_PROBE(fail3); +#endif #if EFSYS_OPT_MEDFORD fail2: EFSYS_PROBE(fail2); @@ -191,19 +209,25 @@ efx_bootcfg_copy_sector( size_t used_bytes; efx_rc_t rc; + /* Minimum buffer is checksum byte and DHCP_END terminator */ + if (data_size < 2) { + rc = ENOSPC; + goto fail1; + } + /* Verify that the area is correctly formatted and checksummed */ rc = efx_bootcfg_verify(enp, sector, sector_length, &used_bytes); if (!handle_format_errors) { if (rc != 0) - goto fail1; + goto fail2; if ((used_bytes < 2) || (sector[used_bytes - 1] != DHCP_END)) { /* Block too short, or DHCP_END missing */ rc = ENOENT; - goto fail2; + goto fail3; } } @@ -237,9 +261,13 @@ efx_bootcfg_copy_sector( */ if (used_bytes > data_size) { rc = ENOSPC; - goto fail3; + goto fail4; } - memcpy(data, sector, used_bytes); + + data[0] = 0; /* checksum, updated below */ + + /* Copy all after the checksum to the target buffer */ + memcpy(data + 1, sector + 1, used_bytes - 1); /* Zero out the unused portion of the target buffer */ if (used_bytes < data_size) @@ -253,6 +281,8 @@ efx_bootcfg_copy_sector( return (0); +fail4: + EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: @@ -277,20 +307,31 @@ efx_bootcfg_read( efx_rc_t rc; uint32_t sector_number; -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD + /* Minimum buffer is checksum byte and DHCP_END terminator */ + if (size < 2) { + rc = ENOSPC; + goto fail1; + } + +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 sector_number = enp->en_nic_cfg.enc_pf; #else sector_number = 0; #endif rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length); if (rc != 0) - goto fail1; + goto fail2; /* The bootcfg sector may be stored in a (larger) shared partition */ rc = efx_bootcfg_sector_info(enp, sector_number, NULL, §or_offset, §or_length); if (rc != 0) - goto fail2; + goto fail3; + + if (sector_length < 2) { + rc = EINVAL; + goto fail4; + } if (sector_length > BOOTCFG_MAX_SIZE) sector_length = BOOTCFG_MAX_SIZE; @@ -298,7 +339,7 @@ efx_bootcfg_read( if (sector_offset + sector_length > partn_length) { /* Partition is too small */ rc = EFBIG; - goto fail3; + goto fail5; } /* @@ -311,28 +352,28 @@ efx_bootcfg_read( EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload); if (payload == NULL) { rc = ENOMEM; - goto fail4; + goto fail6; } } else payload = (uint8_t *)data; if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) - goto fail5; + goto fail7; if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, sector_offset, (caddr_t)payload, sector_length)) != 0) { (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL); - goto fail6; + goto fail8; } if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0) - goto fail7; + goto fail9; /* Verify that the area is correctly formatted and checksummed */ rc = efx_bootcfg_verify(enp, payload, sector_length, &used_bytes); if (rc != 0 || used_bytes == 0) { - payload[0] = (uint8_t)(~DHCP_END & 0xff); + payload[0] = 0; payload[1] = DHCP_END; used_bytes = 2; } @@ -347,10 +388,8 @@ efx_bootcfg_read( * so reinitialise the sector if there isn't room for the character. */ if (payload[used_bytes - 1] != DHCP_END) { - if (used_bytes + 1 > sector_length) { - payload[0] = 0; + if (used_bytes >= sector_length) used_bytes = 1; - } payload[used_bytes] = DHCP_END; ++used_bytes; @@ -362,10 +401,14 @@ efx_bootcfg_read( */ if (used_bytes > size) { rc = ENOSPC; - goto fail8; + goto fail10; } + + data[0] = 0; /* checksum, updated below */ + if (sector_length > size) { - memcpy(data, payload, used_bytes); + /* Copy all after the checksum to the target buffer */ + memcpy(data + 1, payload + 1, used_bytes - 1); EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); } @@ -381,16 +424,20 @@ efx_bootcfg_read( return (0); +fail10: + EFSYS_PROBE(fail10); +fail9: + EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: EFSYS_PROBE(fail7); + if (sector_length > size) + EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); fail6: EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); - if (sector_length > size) - EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload); fail4: EFSYS_PROBE(fail4); fail3: @@ -418,7 +465,7 @@ efx_bootcfg_write( efx_rc_t rc; uint32_t sector_number; -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 sector_number = enp->en_nic_cfg.enc_pf; #else sector_number = 0; diff --git a/drivers/net/sfc/base/efx_check.h b/drivers/net/sfc/base/efx_check.h index 58377759..ef5eadc6 100644 --- a/drivers/net/sfc/base/efx_check.h +++ b/drivers/net/sfc/base/efx_check.h @@ -30,8 +30,9 @@ #if EFSYS_OPT_CHECK_REG /* Verify chip implements accessed registers */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_CHECK_REG */ @@ -44,15 +45,17 @@ #if EFSYS_OPT_DIAG /* Support diagnostic hardware tests */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "DIAG requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "DIAG requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_DIAG */ #if EFSYS_OPT_EV_PREFETCH /* Support optimized EVQ data access */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_EV_PREFETCH */ @@ -62,21 +65,23 @@ #if EFSYS_OPT_FILTER /* Support hardware packet filters */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "FILTER requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "FILTER requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_FILTER */ -#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) # if !EFSYS_OPT_FILTER -# error "HUNTINGTON or MEDFORD requires FILTER" +# error "HUNTINGTON or MEDFORD or MEDFORD2 requires FILTER" # endif #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_LOOPBACK /* Support hardware loopback modes */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_LOOPBACK */ @@ -90,21 +95,24 @@ #if EFSYS_OPT_MAC_STATS /* Support MAC statistics */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_MAC_STATS */ #if EFSYS_OPT_MCDI /* Support management controller messages */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "MCDI requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "MCDI requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_MCDI */ -#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) # if !EFSYS_OPT_MCDI -# error "SIENA or HUNTINGTON or MEDFORD requires MCDI" +# error "SIENA or HUNTINGTON or MEDFORD or MEDFORD2 requires MCDI" # endif #endif @@ -144,15 +152,17 @@ #if EFSYS_OPT_MON_STATS /* Support monitor statistics (voltage/temperature) */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_MON_STATS */ #if EFSYS_OPT_MON_MCDI /* Support Monitor via mcdi */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_MON_MCDI*/ @@ -166,11 +176,19 @@ #if EFSYS_OPT_NVRAM /* Support non volatile configuration */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_NVRAM */ +#if EFSYS_OPT_IMAGE_LAYOUT +/* Support signed image layout handling */ +# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "IMAGE_LAYOUT requires MEDFORD or MEDFORD2" +# endif +#endif /* EFSYS_OPT_IMAGE_LAYOUT */ + #ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM # error "NVRAM_FALCON_BOOTROM is obsolete and is not supported." #endif @@ -200,8 +218,9 @@ #if EFSYS_OPT_PHY_LED_CONTROL /* Support for PHY LED control */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_PHY_LED_CONTROL */ @@ -246,8 +265,9 @@ #if EFSYS_OPT_QSTATS /* Support EVQ/RXQ/TXQ statistics */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_QSTATS */ @@ -257,15 +277,17 @@ #if EFSYS_OPT_RX_SCALE /* Support receive scaling (RSS) */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCATTER /* Support receive scatter DMA */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_RX_SCATTER */ @@ -275,8 +297,9 @@ #if EFSYS_OPT_VPD /* Support PCI Vital Product Data (VPD) */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "VPD requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "VPD requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_VPD */ @@ -290,8 +313,9 @@ #if EFSYS_OPT_BIST /* Support BIST */ -# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "BIST requires SIENA or HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \ + EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "BIST requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_BIST */ @@ -307,23 +331,37 @@ #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC /* Support adapters with missing static config (for factory use only) */ -# if !EFSYS_OPT_MEDFORD -# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD" +# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ #if EFSYS_OPT_RX_PACKED_STREAM /* Support packed stream mode */ -# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) -# error "PACKED_STREAM requires HUNTINGTON or MEDFORD" +# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "PACKED_STREAM requires HUNTINGTON or MEDFORD or MEDFORD2" +# endif +#endif + +#if EFSYS_OPT_RX_ES_SUPER_BUFFER +/* Support equal stride super-buffer mode */ +# if !(EFSYS_OPT_MEDFORD2) +# error "ES_SUPER_BUFFER requires MEDFORD2" # endif #endif /* Support hardware assistance for tunnels */ #if EFSYS_OPT_TUNNEL -# if !EFSYS_OPT_MEDFORD -# error "TUNNEL requires MEDFORD" +# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) +# error "TUNNEL requires MEDFORD or MEDFORD2" # endif #endif /* EFSYS_OPT_TUNNEL */ +#if EFSYS_OPT_FW_SUBVARIANT_AWARE +/* Advertise that the driver is firmware subvariant aware */ +# if !(EFSYS_OPT_MEDFORD2) +# error "FW_SUBVARIANT_AWARE requires MEDFORD2" +# endif +#endif + #endif /* _SYS_EFX_CHECK_H */ diff --git a/drivers/net/sfc/base/efx_ev.c b/drivers/net/sfc/base/efx_ev.c index 949d352a..1139cc26 100644 --- a/drivers/net/sfc/base/efx_ev.c +++ b/drivers/net/sfc/base/efx_ev.c @@ -91,7 +91,7 @@ static const efx_ev_ops_t __efx_ev_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_ev_ops_t __efx_ev_ef10_ops = { ef10_ev_init, /* eevo_init */ ef10_ev_fini, /* eevo_fini */ @@ -104,7 +104,7 @@ static const efx_ev_ops_t __efx_ev_ef10_ops = { ef10_ev_qstats_update, /* eevo_qstats_update */ #endif }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t @@ -141,6 +141,12 @@ efx_ev_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + eevop = &__efx_ev_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; diff --git a/drivers/net/sfc/base/efx_filter.c b/drivers/net/sfc/base/efx_filter.c index b92541aa..412298ac 100644 --- a/drivers/net/sfc/base/efx_filter.c +++ b/drivers/net/sfc/base/efx_filter.c @@ -56,7 +56,7 @@ static const efx_filter_ops_t __efx_filter_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_filter_ops_t __efx_filter_ef10_ops = { ef10_filter_init, /* efo_init */ ef10_filter_fini, /* efo_fini */ @@ -66,7 +66,7 @@ static const efx_filter_ops_t __efx_filter_ef10_ops = { ef10_filter_supported_filters, /* efo_supported_filters */ ef10_filter_reconfigure, /* efo_reconfigure */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_filter_insert( @@ -74,12 +74,33 @@ efx_filter_insert( __inout efx_filter_spec_t *spec) { const efx_filter_ops_t *efop = enp->en_efop; + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + efx_rc_t rc; EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER); EFSYS_ASSERT3P(spec, !=, NULL); EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX); + if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) && + !encp->enc_filter_action_mark_supported) { + rc = ENOTSUP; + goto fail1; + } + + if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) && + !encp->enc_filter_action_flag_supported) { + rc = ENOTSUP; + goto fail2; + } + return (efop->efo_add(enp, spec, B_FALSE)); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); } __checkReturn efx_rc_t @@ -145,6 +166,12 @@ efx_filter_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + efop = &__efx_filter_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -412,7 +439,7 @@ efx_filter_spec_set_encap_type( __in efx_tunnel_protocol_t encap_type, __in efx_filter_inner_frame_match_t inner_frame_match) { - uint32_t match_flags = 0; + uint32_t match_flags = EFX_FILTER_MATCH_ENCAP_TYPE; uint8_t ip_proto; efx_rc_t rc; @@ -462,6 +489,43 @@ fail1: return (rc); } +/* + * Specify inner and outer Ethernet address and VXLAN ID in filter + * specification. + */ + __checkReturn efx_rc_t +efx_filter_spec_set_vxlan_full( + __inout efx_filter_spec_t *spec, + __in const uint8_t *vxlan_id, + __in const uint8_t *inner_addr, + __in const uint8_t *outer_addr) +{ + EFSYS_ASSERT3P(spec, !=, NULL); + EFSYS_ASSERT3P(vxlan_id, !=, NULL); + EFSYS_ASSERT3P(inner_addr, !=, NULL); + EFSYS_ASSERT3P(outer_addr, !=, NULL); + + if ((inner_addr == NULL) && (outer_addr == NULL)) + return (EINVAL); + + if (vxlan_id != NULL) { + spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; + memcpy(spec->efs_vni_or_vsid, vxlan_id, EFX_VNI_OR_VSID_LEN); + } + if (outer_addr != NULL) { + spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC; + memcpy(spec->efs_loc_mac, outer_addr, EFX_MAC_ADDR_LEN); + } + if (inner_addr != NULL) { + spec->efs_match_flags |= EFX_FILTER_MATCH_IFRM_LOC_MAC; + memcpy(spec->efs_ifrm_loc_mac, inner_addr, EFX_MAC_ADDR_LEN); + } + spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; + + return (0); +} + #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_filter_spec_set_rss_context( @@ -953,6 +1017,7 @@ siena_filter_build( default: EFSYS_ASSERT(B_FALSE); + EFX_ZERO_OWORD(*filter); return (0); } diff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h index ed685cba..548834f9 100644 --- a/drivers/net/sfc/base/efx_impl.h +++ b/drivers/net/sfc/base/efx_impl.h @@ -29,9 +29,13 @@ #include "medford_impl.h" #endif /* EFSYS_OPT_MEDFORD */ -#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +#if EFSYS_OPT_MEDFORD2 +#include "medford2_impl.h" +#endif /* EFSYS_OPT_MEDFORD2 */ + +#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) #include "ef10_impl.h" -#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */ +#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) */ #ifdef __cplusplus extern "C" { @@ -61,6 +65,7 @@ typedef enum efx_mac_type_e { EFX_MAC_SIENA, EFX_MAC_HUNTINGTON, EFX_MAC_MEDFORD, + EFX_MAC_MEDFORD2, EFX_MAC_NTYPES } efx_mac_type_t; @@ -112,16 +117,36 @@ typedef struct efx_tx_ops_s { uint32_t, uint8_t, efx_desc_t *); void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t, - uint32_t, uint16_t, + uint16_t, uint32_t, uint16_t, efx_desc_t *, int); void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t, efx_desc_t *); + void (*etxo_qdesc_checksum_create)(efx_txq_t *, uint16_t, + efx_desc_t *); #if EFSYS_OPT_QSTATS void (*etxo_qstats_update)(efx_txq_t *, efsys_stat_t *); #endif } efx_tx_ops_t; +typedef union efx_rxq_type_data_u { + /* Dummy member to have non-empty union if no options are enabled */ + uint32_t ertd_dummy; +#if EFSYS_OPT_RX_PACKED_STREAM + struct { + uint32_t eps_buf_size; + } ertd_packed_stream; +#endif +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + struct { + uint32_t eessb_bufs_per_desc; + uint32_t eessb_max_dma_len; + uint32_t eessb_buf_stride; + uint32_t eessb_hol_block_timeout; + } ertd_es_super_buffer; +#endif +} efx_rxq_type_data_t; + typedef struct efx_rx_ops_s { efx_rc_t (*erxo_init)(efx_nic_t *); void (*erxo_fini)(efx_nic_t *); @@ -158,7 +183,8 @@ typedef struct efx_rx_ops_s { efx_rc_t (*erxo_qflush)(efx_rxq_t *); void (*erxo_qenable)(efx_rxq_t *); efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int, - unsigned int, efx_rxq_type_t, uint32_t, + unsigned int, efx_rxq_type_t, + const efx_rxq_type_data_t *, efsys_mem_t *, size_t, uint32_t, unsigned int, efx_evq_t *, efx_rxq_t *); @@ -398,9 +424,9 @@ typedef struct efx_filter_s { #if EFSYS_OPT_SIENA siena_filter_t *ef_siena_filter; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 ef10_filter_table_t *ef_ef10_filter_table; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ } efx_filter_t; #if EFSYS_OPT_SIENA @@ -640,6 +666,7 @@ struct efx_nic_s { const efx_ev_ops_t *en_eevop; const efx_tx_ops_t *en_etxop; const efx_rx_ops_t *en_erxop; + efx_fw_variant_t efv; #if EFSYS_OPT_FILTER efx_filter_t en_filter; const efx_filter_ops_t *en_efop; @@ -683,7 +710,7 @@ struct efx_nic_s { #endif /* EFSYS_OPT_SIENA */ int enu_unused; } en_u; -#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) +#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) union en_arch { struct { int ena_vi_base; @@ -704,7 +731,7 @@ struct efx_nic_s { size_t ena_wc_mem_map_size; } ef10; } en_arch; -#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */ +#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) */ }; @@ -716,9 +743,11 @@ typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *, typedef struct efx_evq_rxq_state_s { unsigned int eers_rx_read_ptr; unsigned int eers_rx_mask; -#if EFSYS_OPT_RX_PACKED_STREAM +#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER unsigned int eers_rx_stream_npackets; boolean_t eers_rx_packed_stream; +#endif +#if EFSYS_OPT_RX_PACKED_STREAM unsigned int eers_rx_packed_stream_credits; #endif } efx_evq_rxq_state_t; @@ -825,6 +854,10 @@ struct efx_txq_s { rev = 'E'; \ break; \ \ + case EFX_FAMILY_MEDFORD2: \ + rev = 'F'; \ + break; \ + \ default: \ rev = '?'; \ break; \ @@ -915,6 +948,15 @@ struct efx_txq_s { _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) +/* + * Accessors for memory BAR non-VI tables. + * + * Code used on EF10 *must* use EFX_BAR_VI_*() macros for per-VI registers, + * to ensure the correct runtime VI window size is used on Medford2. + * + * Siena-only code may continue using EFX_BAR_TBL_*() macros for VI registers. + */ + #define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ @@ -941,21 +983,6 @@ struct efx_txq_s { _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) -#define EFX_BAR_TBL_WRITED2(_enp, _reg, _index, _edp, _lock) \ - do { \ - EFX_CHECK_REG((_enp), (_reg)); \ - EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \ - uint32_t, (_index), \ - uint32_t, _reg ## _OFST, \ - uint32_t, (_edp)->ed_u32[0]); \ - EFSYS_BAR_WRITED((_enp)->en_esbp, \ - (_reg ## _OFST + \ - (2 * sizeof (efx_dword_t)) + \ - ((_index) * _reg ## _STEP)), \ - (_edp), (_lock)); \ - _NOTE(CONSTANTCONDITION) \ - } while (B_FALSE) - #define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ @@ -1032,16 +1059,66 @@ struct efx_txq_s { } while (B_FALSE) /* - * Allow drivers to perform optimised 128-bit doorbell writes. + * Accessors for memory BAR per-VI registers. + * + * The VI window size is 8KB for Medford and all earlier controllers. + * For Medford2, the VI window size can be 8KB, 16KB or 64KB. + */ + +#define EFX_BAR_VI_READD(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_BAR_READD((_enp)->en_esbp, \ + ((_reg ## _OFST) + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ + (_edp), (_lock)); \ + EFSYS_PROBE4(efx_bar_vi_readd, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_VI_WRITED(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + EFSYS_BAR_WRITED((_enp)->en_esbp, \ + ((_reg ## _OFST) + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ + (_edp), (_lock)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +#define EFX_BAR_VI_WRITED2(_enp, _reg, _index, _edp, _lock) \ + do { \ + EFX_CHECK_REG((_enp), (_reg)); \ + EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \ + uint32_t, (_index), \ + uint32_t, _reg ## _OFST, \ + uint32_t, (_edp)->ed_u32[0]); \ + EFSYS_BAR_WRITED((_enp)->en_esbp, \ + ((_reg ## _OFST) + \ + (2 * sizeof (efx_dword_t)) + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ + (_edp), (_lock)); \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + +/* + * Allow drivers to perform optimised 128-bit VI doorbell writes. * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are * special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid * the need for locking in the host, and are the only ones known to be safe to * use 128-bites write with. */ -#define EFX_BAR_TBL_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \ +#define EFX_BAR_VI_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \ do { \ EFX_CHECK_REG((_enp), (_reg)); \ - EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \ + EFSYS_PROBE7(efx_bar_vi_doorbell_writeo, \ const char *, #_reg, \ uint32_t, (_index), \ uint32_t, _reg ## _OFST, \ @@ -1050,7 +1127,8 @@ struct efx_txq_s { uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \ - (_reg ## _OFST + ((_index) * _reg ## _STEP)), \ + (_reg ## _OFST + \ + ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \ (_eop)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) diff --git a/drivers/net/sfc/base/efx_intr.c b/drivers/net/sfc/base/efx_intr.c index 83ca177d..b518916d 100644 --- a/drivers/net/sfc/base/efx_intr.c +++ b/drivers/net/sfc/base/efx_intr.c @@ -75,7 +75,7 @@ static const efx_intr_ops_t __efx_intr_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_intr_ops_t __efx_intr_ef10_ops = { ef10_intr_init, /* eio_init */ ef10_intr_enable, /* eio_enable */ @@ -87,7 +87,7 @@ static const efx_intr_ops_t __efx_intr_ef10_ops = { ef10_intr_fatal, /* eio_fatal */ ef10_intr_fini, /* eio_fini */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_intr_init( @@ -132,6 +132,12 @@ efx_intr_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + eiop = &__efx_intr_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(B_FALSE); rc = ENOTSUP; @@ -283,6 +289,12 @@ siena_intr_init( { efx_intr_t *eip = &(enp->en_intr); efx_oword_t oword; + efx_rc_t rc; + + if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_INTR_SIZE)) { + rc = EINVAL; + goto fail1; + } /* * bug17213 workaround. @@ -314,6 +326,11 @@ siena_intr_init( EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword); return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); } static void diff --git a/drivers/net/sfc/base/efx_lic.c b/drivers/net/sfc/base/efx_lic.c index ad4d2211..49c00347 100644 --- a/drivers/net/sfc/base/efx_lic.c +++ b/drivers/net/sfc/base/efx_lic.c @@ -10,6 +10,9 @@ #if EFSYS_OPT_LICENSING #include "ef10_tlv_layout.h" +#if EFSYS_OPT_SIENA +#include "efx_regs_mcdi_aoe.h" +#endif #if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON @@ -162,7 +165,7 @@ static const efx_lic_ops_t __efx_lic_v2_ops = { #endif /* EFSYS_OPT_HUNTINGTON */ -#if EFSYS_OPT_MEDFORD +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static __checkReturn efx_rc_t efx_mcdi_licensing_v3_update_licenses( @@ -286,7 +289,7 @@ static const efx_lic_ops_t __efx_lic_v3_ops = { efx_lic_v3_finish_partition, /* elo_finish_partition */ }; -#endif /* EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ /* V1 Licensing - used in Siena Modena only */ @@ -819,7 +822,7 @@ fail1: /* V3 Licensing - used starting from Medford family. See SF-114884-SW */ -#if EFSYS_OPT_MEDFORD +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static __checkReturn efx_rc_t efx_mcdi_licensing_v3_update_licenses( @@ -829,7 +832,8 @@ efx_mcdi_licensing_v3_update_licenses( uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN]; efx_rc_t rc; - EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); + EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) || + (enp->en_family == EFX_FAMILY_MEDFORD2)); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING_V3; @@ -866,7 +870,8 @@ efx_mcdi_licensing_v3_report_license( MC_CMD_LICENSING_V3_OUT_LEN)]; efx_rc_t rc; - EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); + EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) || + (enp->en_family == EFX_FAMILY_MEDFORD2)); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_LICENSING_V3; @@ -930,7 +935,8 @@ efx_mcdi_licensing_v3_app_state( uint32_t app_state; efx_rc_t rc; - EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD); + EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) || + (enp->en_family == EFX_FAMILY_MEDFORD2)); (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE; @@ -1262,7 +1268,7 @@ fail1: } -#endif /* EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_lic_init( @@ -1296,6 +1302,12 @@ efx_lic_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + elop = &__efx_lic_v3_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; diff --git a/drivers/net/sfc/base/efx_mac.c b/drivers/net/sfc/base/efx_mac.c index 511f3eb5..57436b95 100644 --- a/drivers/net/sfc/base/efx_mac.c +++ b/drivers/net/sfc/base/efx_mac.c @@ -39,7 +39,7 @@ static const efx_mac_ops_t __efx_mac_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_mac_ops_t __efx_mac_ef10_ops = { ef10_mac_poll, /* emo_poll */ ef10_mac_up, /* emo_up */ @@ -62,7 +62,7 @@ static const efx_mac_ops_t __efx_mac_ef10_ops = { ef10_mac_stats_update /* emo_stats_update */ #endif /* EFSYS_OPT_MAC_STATS */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_mac_pdu_set( @@ -492,7 +492,7 @@ efx_mac_filter_default_rxq_clear( #if EFSYS_OPT_NAMES -/* START MKCONFIG GENERATED EfxMacStatNamesBlock c11b91b42f922516 */ +/* START MKCONFIG GENERATED EfxMacStatNamesBlock 1a45a82fcfb30c1b */ static const char * const __efx_mac_stat_name[] = { "rx_octets", "rx_pkts", @@ -575,6 +575,31 @@ static const char * const __efx_mac_stat_name[] = { "vadapter_tx_bad_packets", "vadapter_tx_bad_bytes", "vadapter_tx_overflow", + "fec_uncorrected_errors", + "fec_corrected_errors", + "fec_corrected_symbols_lane0", + "fec_corrected_symbols_lane1", + "fec_corrected_symbols_lane2", + "fec_corrected_symbols_lane3", + "ctpio_vi_busy_fallback", + "ctpio_long_write_success", + "ctpio_missing_dbell_fail", + "ctpio_overflow_fail", + "ctpio_underflow_fail", + "ctpio_timeout_fail", + "ctpio_noncontig_wr_fail", + "ctpio_frm_clobber_fail", + "ctpio_invalid_wr_fail", + "ctpio_vi_clobber_fallback", + "ctpio_unqualified_fallback", + "ctpio_runt_fallback", + "ctpio_success", + "ctpio_fallback", + "ctpio_poison", + "ctpio_erase", + "rxdp_scatter_disabled_trunc", + "rxdp_hlb_idle", + "rxdp_hlb_timeout", }; /* END MKCONFIG GENERATED EfxMacStatNamesBlock */ @@ -826,6 +851,13 @@ efx_mac_select( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + emop = &__efx_mac_ef10_ops; + type = EFX_MAC_MEDFORD2; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: rc = EINVAL; goto fail1; diff --git a/drivers/net/sfc/base/efx_mcdi.c b/drivers/net/sfc/base/efx_mcdi.c index 347a5b35..d4ebcf26 100644 --- a/drivers/net/sfc/base/efx_mcdi.c +++ b/drivers/net/sfc/base/efx_mcdi.c @@ -45,7 +45,7 @@ static const efx_mcdi_ops_t __efx_mcdi_siena_ops = { #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = { ef10_mcdi_init, /* emco_init */ @@ -58,7 +58,7 @@ static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = { ef10_mcdi_get_timeout, /* emco_get_timeout */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ @@ -92,6 +92,12 @@ efx_mcdi_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + emcop = &__efx_mcdi_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -1258,13 +1264,21 @@ efx_mcdi_drv_attach( req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN; /* - * Use DONT_CARE for the datapath firmware type to ensure that the - * driver can attach to an unprivileged function. The datapath firmware - * type to use is controlled by the 'sfboot' utility. + * Typically, client drivers use DONT_CARE for the datapath firmware + * type to ensure that the driver can attach to an unprivileged + * function. The datapath firmware type to use is controlled by the + * 'sfboot' utility. + * If a client driver wishes to attach with a specific datapath firmware + * type, that can be passed in second argument of efx_nic_probe API. One + * such example is the ESXi native driver that attempts attaching with + * FULL_FEATURED datapath firmware type first and fall backs to + * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails. */ - MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0); + MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE, + DRV_ATTACH_IN_ATTACH, attach ? 1 : 0, + DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE); MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1); - MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE); + MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv); efx_mcdi_execute(enp, &req); @@ -1426,6 +1440,11 @@ efx_mcdi_get_phy_cfg( efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN, MC_CMD_GET_PHY_CFG_OUT_LEN)]; +#if EFSYS_OPT_NAMES + const char *namep; + size_t namelen; +#endif + uint32_t phy_media_type; efx_rc_t rc; (void) memset(payload, 0, sizeof (payload)); @@ -1449,10 +1468,12 @@ efx_mcdi_get_phy_cfg( encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE); #if EFSYS_OPT_NAMES - (void) strncpy(encp->enc_phy_name, - MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME), - MIN(sizeof (encp->enc_phy_name) - 1, - MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); + namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME); + namelen = MIN(sizeof (encp->enc_phy_name) - 1, + strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); + (void) memset(encp->enc_phy_name, 0, + sizeof (encp->enc_phy_name)); + memcpy(encp->enc_phy_name, namep, namelen); #endif /* EFSYS_OPT_NAMES */ (void) memset(encp->enc_phy_revision, 0, sizeof (encp->enc_phy_revision)); @@ -1474,8 +1495,8 @@ efx_mcdi_get_phy_cfg( EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS); EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T); EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS); - epp->ep_fixed_port_type = - (efx_phy_media_type_t) MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); + phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); + epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type; if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES) epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID; @@ -1621,7 +1642,7 @@ fail1: #if EFSYS_OPT_BIST -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 /* * Enter bist offline mode. This is a fw mode which puts the NIC into a state * where memory BIST tests can be run and not much else can interfere or happen. @@ -1657,7 +1678,7 @@ fail1: return (rc); } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_mcdi_bist_start( @@ -1778,7 +1799,7 @@ efx_mcdi_mac_stats( { efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN, - MC_CMD_MAC_STATS_OUT_DMA_LEN)]; + MC_CMD_MAC_STATS_V2_OUT_DMA_LEN)]; int clear = (action == EFX_STATS_CLEAR); int upload = (action == EFX_STATS_UPLOAD); int enable = (action == EFX_STATS_ENABLE_NOEVENTS); @@ -1791,7 +1812,7 @@ efx_mcdi_mac_stats( req.emr_in_buf = payload; req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN; req.emr_out_buf = payload; - req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN; + req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN; MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD, MAC_STATS_IN_DMA, upload, @@ -1801,19 +1822,35 @@ efx_mcdi_mac_stats( MAC_STATS_IN_PERIODIC_NOEVENT, !events, MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0); - if (esmp != NULL) { - int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t); + if (enable || events || upload) { + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; + uint32_t bytes; + + /* Periodic stats or stats upload require a DMA buffer */ + if (esmp == NULL) { + rc = EINVAL; + goto fail1; + } + + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { + /* MAC stats count too small for legacy MAC stats */ + rc = ENOSPC; + goto fail2; + } + + bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t); - EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <= - EFX_MAC_STATS_SIZE); + if (EFSYS_MEM_SIZE(esmp) < bytes) { + /* DMA buffer too small */ + rc = ENOSPC; + goto fail3; + } MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO, EFSYS_MEM_ADDR(esmp) & 0xffffffff); MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI, EFSYS_MEM_ADDR(esmp) >> 32); MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes); - } else { - EFSYS_ASSERT(!upload && !enable && !events); } /* @@ -1831,12 +1868,18 @@ efx_mcdi_mac_stats( if ((req.emr_rc != ENOENT) || (enp->en_rx_qcount + enp->en_tx_qcount != 0)) { rc = req.emr_rc; - goto fail1; + goto fail4; } } return (0); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); @@ -1921,7 +1964,7 @@ fail1: #endif /* EFSYS_OPT_MAC_STATS */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 /* * This function returns the pf and vf number of a function. If it is a pf the @@ -2020,7 +2063,7 @@ fail1: return (rc); } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_mcdi_set_workaround( diff --git a/drivers/net/sfc/base/efx_mcdi.h b/drivers/net/sfc/base/efx_mcdi.h index 4e69f048..253a9e60 100644 --- a/drivers/net/sfc/base/efx_mcdi.h +++ b/drivers/net/sfc/base/efx_mcdi.h @@ -166,11 +166,11 @@ efx_mcdi_mac_spoofing_supported( #if EFSYS_OPT_BIST -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 extern __checkReturn efx_rc_t efx_mcdi_bist_enable_offline( __in efx_nic_t *enp); -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ extern __checkReturn efx_rc_t efx_mcdi_bist_start( __in efx_nic_t *enp, diff --git a/drivers/net/sfc/base/efx_mon.c b/drivers/net/sfc/base/efx_mon.c index 234a4201..9fc268ec 100644 --- a/drivers/net/sfc/base/efx_mon.c +++ b/drivers/net/sfc/base/efx_mon.c @@ -99,7 +99,7 @@ fail1: #if EFSYS_OPT_NAMES -/* START MKCONFIG GENERATED MonitorStatNamesBlock d92af1538001301f */ +/* START MKCONFIG GENERATED MonitorStatNamesBlock 8150a068198c0f96 */ static const char * const __mon_stat_name[] = { "value_2_5v", "value_vccp1", @@ -180,6 +180,10 @@ static const char * const __mon_stat_name[] = { "board_back_temp", "i1v8", "i2v5", + "i3v3", + "i12v0", + "1v3", + "i1v3", }; /* END MKCONFIG GENERATED MonitorStatNamesBlock */ diff --git a/drivers/net/sfc/base/efx_nic.c b/drivers/net/sfc/base/efx_nic.c index e318c17d..6c162e03 100644 --- a/drivers/net/sfc/base/efx_nic.c +++ b/drivers/net/sfc/base/efx_nic.c @@ -7,11 +7,13 @@ #include "efx.h" #include "efx_impl.h" + __checkReturn efx_rc_t efx_family( __in uint16_t venid, __in uint16_t devid, - __out efx_family_t *efp) + __out efx_family_t *efp, + __out unsigned int *membarp) { if (venid == EFX_PCI_VENID_SFC) { switch (devid) { @@ -21,12 +23,10 @@ efx_family( * Hardware default for PF0 of uninitialised Siena. * manftest must be able to cope with this device id. */ - *efp = EFX_FAMILY_SIENA; - return (0); - case EFX_PCI_DEVID_BETHPAGE: case EFX_PCI_DEVID_SIENA: *efp = EFX_FAMILY_SIENA; + *membarp = EFX_MEM_BAR_SIENA; return (0); #endif /* EFSYS_OPT_SIENA */ @@ -36,17 +36,16 @@ efx_family( * Hardware default for PF0 of uninitialised Huntington. * manftest must be able to cope with this device id. */ - *efp = EFX_FAMILY_HUNTINGTON; - return (0); - case EFX_PCI_DEVID_FARMINGDALE: case EFX_PCI_DEVID_GREENPORT: *efp = EFX_FAMILY_HUNTINGTON; + *membarp = EFX_MEM_BAR_HUNTINGTON_PF; return (0); case EFX_PCI_DEVID_FARMINGDALE_VF: case EFX_PCI_DEVID_GREENPORT_VF: *efp = EFX_FAMILY_HUNTINGTON; + *membarp = EFX_MEM_BAR_HUNTINGTON_VF; return (0); #endif /* EFSYS_OPT_HUNTINGTON */ @@ -56,18 +55,30 @@ efx_family( * Hardware default for PF0 of uninitialised Medford. * manftest must be able to cope with this device id. */ - *efp = EFX_FAMILY_MEDFORD; - return (0); - case EFX_PCI_DEVID_MEDFORD: *efp = EFX_FAMILY_MEDFORD; + *membarp = EFX_MEM_BAR_MEDFORD_PF; return (0); case EFX_PCI_DEVID_MEDFORD_VF: *efp = EFX_FAMILY_MEDFORD; + *membarp = EFX_MEM_BAR_MEDFORD_VF; return (0); #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_PCI_DEVID_MEDFORD2_PF_UNINIT: + /* + * Hardware default for PF0 of uninitialised Medford2. + * manftest must be able to cope with this device id. + */ + case EFX_PCI_DEVID_MEDFORD2: + case EFX_PCI_DEVID_MEDFORD2_VF: + *efp = EFX_FAMILY_MEDFORD2; + *membarp = EFX_MEM_BAR_MEDFORD2; + return (0); +#endif /* EFSYS_OPT_MEDFORD2 */ + case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */ default: break; @@ -78,6 +89,7 @@ efx_family( return (ENOTSUP); } + #if EFSYS_OPT_SIENA static const efx_nic_ops_t __efx_nic_siena_ops = { @@ -135,6 +147,25 @@ static const efx_nic_ops_t __efx_nic_medford_ops = { #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + +static const efx_nic_ops_t __efx_nic_medford2_ops = { + ef10_nic_probe, /* eno_probe */ + medford2_board_cfg, /* eno_board_cfg */ + ef10_nic_set_drv_limits, /* eno_set_drv_limits */ + ef10_nic_reset, /* eno_reset */ + ef10_nic_init, /* eno_init */ + ef10_nic_get_vi_pool, /* eno_get_vi_pool */ + ef10_nic_get_bar_region, /* eno_get_bar_region */ +#if EFSYS_OPT_DIAG + ef10_nic_register_test, /* eno_register_test */ +#endif /* EFSYS_OPT_DIAG */ + ef10_nic_fini, /* eno_fini */ + ef10_nic_unprobe, /* eno_unprobe */ +}; + +#endif /* EFSYS_OPT_MEDFORD2 */ + __checkReturn efx_rc_t efx_nic_create( @@ -213,6 +244,22 @@ efx_nic_create( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + enp->en_enop = &__efx_nic_medford2_ops; + enp->en_features = + EFX_FEATURE_IPV6 | + EFX_FEATURE_LINK_EVENTS | + EFX_FEATURE_PERIODIC_MAC_STATS | + EFX_FEATURE_MCDI | + EFX_FEATURE_MAC_HEADER_FILTERS | + EFX_FEATURE_MCDI_DMA | + EFX_FEATURE_PIO_BUFFERS | + EFX_FEATURE_FW_ASSISTED_TSO_V2 | + EFX_FEATURE_PACKED_STREAM; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: rc = ENOTSUP; goto fail2; @@ -243,7 +290,8 @@ fail1: __checkReturn efx_rc_t efx_nic_probe( - __in efx_nic_t *enp) + __in efx_nic_t *enp, + __in efx_fw_variant_t efv) { const efx_nic_ops_t *enop; efx_rc_t rc; @@ -254,7 +302,27 @@ efx_nic_probe( #endif /* EFSYS_OPT_MCDI */ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE)); + /* Ensure FW variant codes match with MC_CMD_FW codes */ + EFX_STATIC_ASSERT(EFX_FW_VARIANT_FULL_FEATURED == + MC_CMD_FW_FULL_FEATURED); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_LOW_LATENCY == + MC_CMD_FW_LOW_LATENCY); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM == + MC_CMD_FW_PACKED_STREAM); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_HIGH_TX_RATE == + MC_CMD_FW_HIGH_TX_RATE); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1 == + MC_CMD_FW_PACKED_STREAM_HASH_MODE_1); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_RULES_ENGINE == + MC_CMD_FW_RULES_ENGINE); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_DPDK == + MC_CMD_FW_DPDK); + EFX_STATIC_ASSERT(EFX_FW_VARIANT_DONT_CARE == + (int)MC_CMD_FW_DONT_CARE); + enop = enp->en_enop; + enp->efv = efv; + if ((rc = enop->eno_probe(enp)) != 0) goto fail1; @@ -536,6 +604,18 @@ efx_nic_get_fw_version( EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); + /* Ensure RXDP_FW_ID codes match with MC_CMD_GET_CAPABILITIES codes */ + EFX_STATIC_ASSERT(EFX_RXDP_FULL_FEATURED_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP); + EFX_STATIC_ASSERT(EFX_RXDP_LOW_LATENCY_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY); + EFX_STATIC_ASSERT(EFX_RXDP_PACKED_STREAM_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM); + EFX_STATIC_ASSERT(EFX_RXDP_RULES_ENGINE_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE); + EFX_STATIC_ASSERT(EFX_RXDP_DPDK_FW_ID == + MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK); + rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL); if (rc != 0) goto fail2; @@ -607,48 +687,49 @@ efx_loopback_mask( EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS); EFSYS_ASSERT(maskp != NULL); - /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */ - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XPORT == EFX_LOOPBACK_XPORT); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII_WS == EFX_LOOPBACK_XGMII_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS == EFX_LOOPBACK_XAUI_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_FAR == - EFX_LOOPBACK_XAUI_WS_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_NEAR == - EFX_LOOPBACK_XAUI_WS_NEAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_WS == EFX_LOOPBACK_GMII_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS == EFX_LOOPBACK_XFI_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS_FAR == - EFX_LOOPBACK_XFI_WS_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS_WS == EFX_LOOPBACK_PHYXS_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT == EFX_LOOPBACK_PMA_INT); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_NEAR == EFX_LOOPBACK_SD_NEAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FAR == EFX_LOOPBACK_SD_FAR); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT_WS == - EFX_LOOPBACK_PMA_INT_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP2_WS == - EFX_LOOPBACK_SD_FEP2_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP1_5_WS == - EFX_LOOPBACK_SD_FEP1_5_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP_WS == EFX_LOOPBACK_SD_FEP_WS); - EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FES_WS == EFX_LOOPBACK_SD_FES_WS); + /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree */ +#define LOOPBACK_CHECK(_mcdi, _efx) \ + EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_##_mcdi == EFX_LOOPBACK_##_efx) + + LOOPBACK_CHECK(NONE, OFF); + LOOPBACK_CHECK(DATA, DATA); + LOOPBACK_CHECK(GMAC, GMAC); + LOOPBACK_CHECK(XGMII, XGMII); + LOOPBACK_CHECK(XGXS, XGXS); + LOOPBACK_CHECK(XAUI, XAUI); + LOOPBACK_CHECK(GMII, GMII); + LOOPBACK_CHECK(SGMII, SGMII); + LOOPBACK_CHECK(XGBR, XGBR); + LOOPBACK_CHECK(XFI, XFI); + LOOPBACK_CHECK(XAUI_FAR, XAUI_FAR); + LOOPBACK_CHECK(GMII_FAR, GMII_FAR); + LOOPBACK_CHECK(SGMII_FAR, SGMII_FAR); + LOOPBACK_CHECK(XFI_FAR, XFI_FAR); + LOOPBACK_CHECK(GPHY, GPHY); + LOOPBACK_CHECK(PHYXS, PHY_XS); + LOOPBACK_CHECK(PCS, PCS); + LOOPBACK_CHECK(PMAPMD, PMA_PMD); + LOOPBACK_CHECK(XPORT, XPORT); + LOOPBACK_CHECK(XGMII_WS, XGMII_WS); + LOOPBACK_CHECK(XAUI_WS, XAUI_WS); + LOOPBACK_CHECK(XAUI_WS_FAR, XAUI_WS_FAR); + LOOPBACK_CHECK(XAUI_WS_NEAR, XAUI_WS_NEAR); + LOOPBACK_CHECK(GMII_WS, GMII_WS); + LOOPBACK_CHECK(XFI_WS, XFI_WS); + LOOPBACK_CHECK(XFI_WS_FAR, XFI_WS_FAR); + LOOPBACK_CHECK(PHYXS_WS, PHYXS_WS); + LOOPBACK_CHECK(PMA_INT, PMA_INT); + LOOPBACK_CHECK(SD_NEAR, SD_NEAR); + LOOPBACK_CHECK(SD_FAR, SD_FAR); + LOOPBACK_CHECK(PMA_INT_WS, PMA_INT_WS); + LOOPBACK_CHECK(SD_FEP2_WS, SD_FEP2_WS); + LOOPBACK_CHECK(SD_FEP1_5_WS, SD_FEP1_5_WS); + LOOPBACK_CHECK(SD_FEP_WS, SD_FEP_WS); + LOOPBACK_CHECK(SD_FES_WS, SD_FES_WS); + LOOPBACK_CHECK(AOE_INT_NEAR, AOE_INT_NEAR); + LOOPBACK_CHECK(DATA_WS, DATA_WS); + LOOPBACK_CHECK(FORCE_EXT_LINK, FORCE_EXT_LINK); +#undef LOOPBACK_CHECK /* Build bitmask of possible loopback types */ EFX_ZERO_QWORD(mask); @@ -706,7 +787,7 @@ efx_mcdi_get_loopback_modes( efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_mcdi_req_t req; uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN, - MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)]; + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN)]; efx_qword_t mask; efx_qword_t modes; efx_rc_t rc; @@ -716,7 +797,7 @@ efx_mcdi_get_loopback_modes( req.emr_in_buf = payload; req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN; req.emr_out_buf = payload; - req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_LEN; + req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN; efx_mcdi_execute(enp, &req); @@ -757,18 +838,51 @@ efx_mcdi_get_loopback_modes( MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST + MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) { /* Response includes 40G loopback modes */ - modes = - *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_40G); + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_40G); EFX_AND_QWORD(modes, mask); encp->enc_loopback_types[EFX_LINK_40000FDX] = modes; } + if (req.emr_out_length_used >= + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN) { + /* Response includes 25G loopback modes */ + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_V2_25G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_25000FDX] = modes; + } + + if (req.emr_out_length_used >= + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN) { + /* Response includes 50G loopback modes */ + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_V2_50G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_50000FDX] = modes; + } + + if (req.emr_out_length_used >= + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN) { + /* Response includes 100G loopback modes */ + modes = *MCDI_OUT2(req, efx_qword_t, + GET_LOOPBACK_MODES_OUT_V2_100G); + EFX_AND_QWORD(modes, mask); + encp->enc_loopback_types[EFX_LINK_100000FDX] = modes; + } + EFX_ZERO_QWORD(modes); EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]); EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_25000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_50000FDX]); + EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100000FDX]); encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes; return (0); @@ -830,6 +944,82 @@ fail1: return (rc); } +#if EFSYS_OPT_FW_SUBVARIANT_AWARE + + __checkReturn efx_rc_t +efx_nic_get_fw_subvariant( + __in efx_nic_t *enp, + __out efx_nic_fw_subvariant_t *subvariantp) +{ + efx_rc_t rc; + uint32_t value; + + rc = efx_mcdi_get_nic_global(enp, + MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, &value); + if (rc != 0) + goto fail1; + + /* Mapping is not required since values match MCDI */ + EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_DEFAULT == + MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT); + EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM == + MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM); + + switch (value) { + case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT: + case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM: + *subvariantp = value; + break; + default: + rc = EINVAL; + goto fail2; + } + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +efx_nic_set_fw_subvariant( + __in efx_nic_t *enp, + __in efx_nic_fw_subvariant_t subvariant) +{ + efx_rc_t rc; + + switch (subvariant) { + case EFX_NIC_FW_SUBVARIANT_DEFAULT: + case EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM: + /* Mapping is not required since values match MCDI */ + break; + default: + rc = EINVAL; + goto fail1; + } + + rc = efx_mcdi_set_nic_global(enp, + MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, subvariant); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ __checkReturn efx_rc_t efx_nic_check_pcie_link_speed( diff --git a/drivers/net/sfc/base/efx_nvram.c b/drivers/net/sfc/base/efx_nvram.c index c2cc9ad3..be409c3a 100644 --- a/drivers/net/sfc/base/efx_nvram.c +++ b/drivers/net/sfc/base/efx_nvram.c @@ -30,7 +30,7 @@ static const efx_nvram_ops_t __efx_nvram_siena_ops = { #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_nvram_ops_t __efx_nvram_ef10_ops = { #if EFSYS_OPT_DIAG @@ -49,7 +49,7 @@ static const efx_nvram_ops_t __efx_nvram_ef10_ops = { ef10_nvram_buffer_validate, /* envo_buffer_validate */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_nvram_init( @@ -81,6 +81,12 @@ efx_nvram_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + envop = &__efx_nvram_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; diff --git a/drivers/net/sfc/base/efx_phy.c b/drivers/net/sfc/base/efx_phy.c index 069c2836..ba2f51c1 100644 --- a/drivers/net/sfc/base/efx_phy.c +++ b/drivers/net/sfc/base/efx_phy.c @@ -27,7 +27,7 @@ static const efx_phy_ops_t __efx_phy_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_phy_ops_t __efx_phy_ef10_ops = { ef10_phy_power, /* epo_power */ NULL, /* epo_reset */ @@ -44,7 +44,7 @@ static const efx_phy_ops_t __efx_phy_ef10_ops = { ef10_bist_stop, /* epo_bist_stop */ #endif /* EFSYS_OPT_BIST */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_phy_probe( @@ -67,16 +67,25 @@ efx_phy_probe( epop = &__efx_phy_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ + #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: epop = &__efx_phy_ef10_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ + #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: epop = &__efx_phy_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ + +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + epop = &__efx_phy_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: rc = ENOTSUP; goto fail1; @@ -176,6 +185,7 @@ efx_phy_adv_cap_get( break; default: EFSYS_ASSERT(B_FALSE); + *maskp = 0; break; } } diff --git a/drivers/net/sfc/base/efx_port.c b/drivers/net/sfc/base/efx_port.c index a792a9ef..33a1a084 100644 --- a/drivers/net/sfc/base/efx_port.c +++ b/drivers/net/sfc/base/efx_port.c @@ -120,7 +120,7 @@ efx_port_loopback_set( EFSYS_ASSERT(link_mode < EFX_LINK_NMODES); if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode], - loopback_type) == 0) { + (int)loopback_type) == 0) { rc = ENOTSUP; goto fail1; } @@ -180,6 +180,9 @@ static const char * const __efx_loopback_type_name[] = { "SD_FEP1_5_WS", "SD_FEP_WS", "SD_FES_WS", + "AOE_INT_NEAR", + "DATA_WS", + "FORCE_EXT_LINK", }; __checkReturn const char * diff --git a/drivers/net/sfc/base/efx_regs_ef10.h b/drivers/net/sfc/base/efx_regs_ef10.h index 5f978305..968aaaca 100644 --- a/drivers/net/sfc/base/efx_regs_ef10.h +++ b/drivers/net/sfc/base/efx_regs_ef10.h @@ -24,7 +24,7 @@ extern "C" { */ #define ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face @@ -38,7 +38,7 @@ extern "C" { */ #define ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4 #define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8 #define ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face @@ -54,7 +54,7 @@ extern "C" { */ #define ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_BIU_INT_ISR_REG_RESET 0x0 @@ -68,7 +68,7 @@ extern "C" { */ #define ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_MC_DB_LWRD_REG_RESET 0x0 @@ -82,7 +82,7 @@ extern "C" { */ #define ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_MC_DB_HWRD_REG_RESET 0x0 @@ -96,7 +96,7 @@ extern "C" { */ #define ER_DZ_EVQ_RPTR_REG_OFST 0x00000400 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_EVQ_RPTR_REG_STEP 8192 #define ER_DZ_EVQ_RPTR_REG_ROWS 2048 #define ER_DZ_EVQ_RPTR_REG_RESET 0x0 @@ -108,31 +108,125 @@ extern "C" { #define ERF_DZ_EVQ_RPTR_WIDTH 15 +/* + * EVQ_RPTR_REG_64K(32bit): + * + */ + +#define ER_FZ_EVQ_RPTR_REG_64K_OFST 0x00000400 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_RPTR_REG_64K_STEP 65536 +#define ER_FZ_EVQ_RPTR_REG_64K_ROWS 2048 +#define ER_FZ_EVQ_RPTR_REG_64K_RESET 0x0 + + +#define ERF_FZ_EVQ_RPTR_VLD_LBN 15 +#define ERF_FZ_EVQ_RPTR_VLD_WIDTH 1 +#define ERF_FZ_EVQ_RPTR_LBN 0 +#define ERF_FZ_EVQ_RPTR_WIDTH 15 + + +/* + * EVQ_RPTR_REG_16K(32bit): + * + */ + +#define ER_FZ_EVQ_RPTR_REG_16K_OFST 0x00000400 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_RPTR_REG_16K_STEP 16384 +#define ER_FZ_EVQ_RPTR_REG_16K_ROWS 2048 +#define ER_FZ_EVQ_RPTR_REG_16K_RESET 0x0 + + +/* defined as ERF_FZ_EVQ_RPTR_VLD_LBN 15; */ +/* defined as ERF_FZ_EVQ_RPTR_VLD_WIDTH 1 */ +/* defined as ERF_FZ_EVQ_RPTR_LBN 0; */ +/* defined as ERF_FZ_EVQ_RPTR_WIDTH 15 */ + + +/* + * EVQ_TMR_REG_64K(32bit): + * + */ + +#define ER_FZ_EVQ_TMR_REG_64K_OFST 0x00000420 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_TMR_REG_64K_STEP 65536 +#define ER_FZ_EVQ_TMR_REG_64K_ROWS 2048 +#define ER_FZ_EVQ_TMR_REG_64K_RESET 0x0 + + +#define ERF_FZ_TC_TMR_REL_VAL_LBN 16 +#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 +#define ERF_FZ_TC_TIMER_MODE_LBN 14 +#define ERF_FZ_TC_TIMER_MODE_WIDTH 2 +#define ERF_FZ_TC_TIMER_VAL_LBN 0 +#define ERF_FZ_TC_TIMER_VAL_WIDTH 14 + + +/* + * EVQ_TMR_REG_16K(32bit): + * + */ + +#define ER_FZ_EVQ_TMR_REG_16K_OFST 0x00000420 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_EVQ_TMR_REG_16K_STEP 16384 +#define ER_FZ_EVQ_TMR_REG_16K_ROWS 2048 +#define ER_FZ_EVQ_TMR_REG_16K_RESET 0x0 + + +/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */ +/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */ +/* defined as ERF_FZ_TC_TIMER_MODE_LBN 14; */ +/* defined as ERF_FZ_TC_TIMER_MODE_WIDTH 2 */ +/* defined as ERF_FZ_TC_TIMER_VAL_LBN 0; */ +/* defined as ERF_FZ_TC_TIMER_VAL_WIDTH 14 */ + + /* * EVQ_TMR_REG(32bit): * */ #define ER_DZ_EVQ_TMR_REG_OFST 0x00000420 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_EVQ_TMR_REG_STEP 8192 #define ER_DZ_EVQ_TMR_REG_ROWS 2048 #define ER_DZ_EVQ_TMR_REG_RESET 0x0 +/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */ +/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */ #define ERF_DZ_TC_TIMER_MODE_LBN 14 #define ERF_DZ_TC_TIMER_MODE_WIDTH 2 #define ERF_DZ_TC_TIMER_VAL_LBN 0 #define ERF_DZ_TC_TIMER_VAL_WIDTH 14 +/* + * RX_DESC_UPD_REG_16K(32bit): + * + */ + +#define ER_FZ_RX_DESC_UPD_REG_16K_OFST 0x00000830 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_RX_DESC_UPD_REG_16K_STEP 16384 +#define ER_FZ_RX_DESC_UPD_REG_16K_ROWS 2048 +#define ER_FZ_RX_DESC_UPD_REG_16K_RESET 0x0 + + +#define ERF_FZ_RX_DESC_WPTR_LBN 0 +#define ERF_FZ_RX_DESC_WPTR_WIDTH 12 + + /* * RX_DESC_UPD_REG(32bit): * */ #define ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_RX_DESC_UPD_REG_STEP 8192 #define ER_DZ_RX_DESC_UPD_REG_ROWS 2048 #define ER_DZ_RX_DESC_UPD_REG_RESET 0x0 @@ -141,13 +235,74 @@ extern "C" { #define ERF_DZ_RX_DESC_WPTR_LBN 0 #define ERF_DZ_RX_DESC_WPTR_WIDTH 12 + +/* + * RX_DESC_UPD_REG_64K(32bit): + * + */ + +#define ER_FZ_RX_DESC_UPD_REG_64K_OFST 0x00000830 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_RX_DESC_UPD_REG_64K_STEP 65536 +#define ER_FZ_RX_DESC_UPD_REG_64K_ROWS 2048 +#define ER_FZ_RX_DESC_UPD_REG_64K_RESET 0x0 + + +/* defined as ERF_FZ_RX_DESC_WPTR_LBN 0; */ +/* defined as ERF_FZ_RX_DESC_WPTR_WIDTH 12 */ + + +/* + * TX_DESC_UPD_REG_64K(96bit): + * + */ + +#define ER_FZ_TX_DESC_UPD_REG_64K_OFST 0x00000a10 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_TX_DESC_UPD_REG_64K_STEP 65536 +#define ER_FZ_TX_DESC_UPD_REG_64K_ROWS 2048 +#define ER_FZ_TX_DESC_UPD_REG_64K_RESET 0x0 + + +#define ERF_FZ_RSVD_LBN 76 +#define ERF_FZ_RSVD_WIDTH 20 +#define ERF_FZ_TX_DESC_WPTR_LBN 64 +#define ERF_FZ_TX_DESC_WPTR_WIDTH 12 +#define ERF_FZ_TX_DESC_HWORD_LBN 32 +#define ERF_FZ_TX_DESC_HWORD_WIDTH 32 +#define ERF_FZ_TX_DESC_LWORD_LBN 0 +#define ERF_FZ_TX_DESC_LWORD_WIDTH 32 + + +/* + * TX_DESC_UPD_REG_16K(96bit): + * + */ + +#define ER_FZ_TX_DESC_UPD_REG_16K_OFST 0x00000a10 +/* medford2a0=pf_dbell_bar */ +#define ER_FZ_TX_DESC_UPD_REG_16K_STEP 16384 +#define ER_FZ_TX_DESC_UPD_REG_16K_ROWS 2048 +#define ER_FZ_TX_DESC_UPD_REG_16K_RESET 0x0 + + +/* defined as ERF_FZ_RSVD_LBN 76; */ +/* defined as ERF_FZ_RSVD_WIDTH 20 */ +/* defined as ERF_FZ_TX_DESC_WPTR_LBN 64; */ +/* defined as ERF_FZ_TX_DESC_WPTR_WIDTH 12 */ +/* defined as ERF_FZ_TX_DESC_HWORD_LBN 32; */ +/* defined as ERF_FZ_TX_DESC_HWORD_WIDTH 32 */ +/* defined as ERF_FZ_TX_DESC_LWORD_LBN 0; */ +/* defined as ERF_FZ_TX_DESC_LWORD_WIDTH 32 */ + + /* * TX_DESC_UPD_REG(96bit): * */ #define ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10 -/* hunta0,medforda0=pcie_pf_bar2 */ +/* hunta0,medforda0,medford2a0=pf_dbell_bar */ #define ER_DZ_TX_DESC_UPD_REG_STEP 8192 #define ER_DZ_TX_DESC_UPD_REG_ROWS 2048 #define ER_DZ_TX_DESC_UPD_REG_RESET 0x0 @@ -233,16 +388,24 @@ extern "C" { #define ESF_DZ_RX_EV_SOFT2_WIDTH 2 #define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48 #define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4 -#define ESF_DZ_RX_L4_CLASS_LBN 45 -#define ESF_DZ_RX_L4_CLASS_WIDTH 3 -#define ESE_DZ_L4_CLASS_RSVD7 7 -#define ESE_DZ_L4_CLASS_RSVD6 6 -#define ESE_DZ_L4_CLASS_RSVD5 5 -#define ESE_DZ_L4_CLASS_RSVD4 4 -#define ESE_DZ_L4_CLASS_RSVD3 3 -#define ESE_DZ_L4_CLASS_UDP 2 -#define ESE_DZ_L4_CLASS_TCP 1 -#define ESE_DZ_L4_CLASS_UNKNOWN 0 +#define ESF_DE_RX_L4_CLASS_LBN 45 +#define ESF_DE_RX_L4_CLASS_WIDTH 3 +#define ESE_DE_L4_CLASS_RSVD7 7 +#define ESE_DE_L4_CLASS_RSVD6 6 +#define ESE_DE_L4_CLASS_RSVD5 5 +#define ESE_DE_L4_CLASS_RSVD4 4 +#define ESE_DE_L4_CLASS_RSVD3 3 +#define ESE_DE_L4_CLASS_UDP 2 +#define ESE_DE_L4_CLASS_TCP 1 +#define ESE_DE_L4_CLASS_UNKNOWN 0 +#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47 +#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1 +#define ESF_FZ_RX_L4_CLASS_LBN 45 +#define ESF_FZ_RX_L4_CLASS_WIDTH 2 +#define ESE_FZ_L4_CLASS_RSVD3 3 +#define ESE_FZ_L4_CLASS_UDP 2 +#define ESE_FZ_L4_CLASS_TCP 1 +#define ESE_FZ_L4_CLASS_UNKNOWN 0 #define ESF_DZ_RX_L3_CLASS_LBN 42 #define ESF_DZ_RX_L3_CLASS_WIDTH 3 #define ESE_DZ_L3_CLASS_RSVD7 7 @@ -289,6 +452,8 @@ extern "C" { #define ESF_EZ_RX_ABORT_WIDTH 1 #define ESF_DZ_RX_ECC_ERR_LBN 29 #define ESF_DZ_RX_ECC_ERR_WIDTH 1 +#define ESF_DZ_RX_TRUNC_ERR_LBN 29 +#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1 #define ESF_DZ_RX_CRC1_ERR_LBN 28 #define ESF_DZ_RX_CRC1_ERR_WIDTH 1 #define ESF_DZ_RX_CRC0_ERR_LBN 27 @@ -419,6 +584,8 @@ extern "C" { #define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 #define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56 #define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3 +#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 #define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 #define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 #define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48 @@ -429,7 +596,7 @@ extern "C" { #define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 -/* TX_TSO_FATSO2A_DESC */ +/* ES_TX_TSO_V2_DESC_A */ #define ESF_DZ_TX_DESC_IS_OPT_LBN 63 #define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 #define ESF_DZ_TX_OPTION_TYPE_LBN 60 @@ -449,7 +616,7 @@ extern "C" { #define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 -/* TX_TSO_FATSO2B_DESC */ +/* ES_TX_TSO_V2_DESC_B */ #define ESF_DZ_TX_DESC_IS_OPT_LBN 63 #define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 #define ESF_DZ_TX_OPTION_TYPE_LBN 60 @@ -463,12 +630,10 @@ extern "C" { #define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2 #define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1 #define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0 -#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 16 -#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16 #define ESF_DZ_TX_TSO_TCP_MSS_LBN 32 #define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16 -#define ESF_DZ_TX_TSO_INNER_PE_CSUM_LBN 0 -#define ESF_DZ_TX_TSO_INNER_PE_CSUM_WIDTH 16 +#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0 +#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16 /* ES_TX_VLAN_DESC */ @@ -533,6 +698,21 @@ extern "C" { #define ES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48 #define ES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16 +/* Equal stride super-buffer RX packet prefix (see SF-119419-TC) */ +#define ES_EZ_ESSB_RX_PREFIX_LEN 8 +#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_LBN 0 +#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_WIDTH 16 +#define ES_EZ_ESSB_RX_PREFIX_MARK_LBN 16 +#define ES_EZ_ESSB_RX_PREFIX_MARK_WIDTH 8 +#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN 28 +#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_WIDTH 1 +#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN 29 +#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_WIDTH 1 +#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN 30 +#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_WIDTH 1 +#define ES_EZ_ESSB_RX_PREFIX_HASH_LBN 32 +#define ES_EZ_ESSB_RX_PREFIX_HASH_WIDTH 32 + /* * An extra flag for the packed stream mode, * signalling the start of a new buffer diff --git a/drivers/net/sfc/base/efx_regs_mcdi.h b/drivers/net/sfc/base/efx_regs_mcdi.h index 7389877a..cf8a7936 100644 --- a/drivers/net/sfc/base/efx_regs_mcdi.h +++ b/drivers/net/sfc/base/efx_regs_mcdi.h @@ -280,7 +280,8 @@ #define MC_CMD_ERR_NO_PRIVILEGE 0x1013 /* Workaround 26807 could not be turned on/off because some functions * have already installed filters. See the comment at - * MC_CMD_WORKAROUND_BUG26807. */ + * MC_CMD_WORKAROUND_BUG26807. + * May also returned for other operations such as sub-variant switching. */ #define MC_CMD_ERR_FILTERS_PRESENT 0x1014 /* The clock whose frequency you've attempted to set set * doesn't exist on this NIC */ @@ -291,6 +292,18 @@ /* This command needs to be processed in the background but there were no * resources to do so. Send it again after a command has completed. */ #define MC_CMD_ERR_QUEUE_FULL 0x1017 +/* The operation could not be completed because the PCIe link has gone + * away. This error code is never expected to be returned over the TLP + * transport. */ +#define MC_CMD_ERR_NO_PCIE 0x1018 +/* The operation could not be completed because the datapath has gone + * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the + * datapath absence may be temporary*/ +#define MC_CMD_ERR_NO_DATAPATH 0x1019 +/* The operation could not complete because some VIs are allocated */ +#define MC_CMD_ERR_VIS_PRESENT 0x101a +/* The operation could not complete because some PIO buffers are allocated */ +#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b #define MC_CMD_ERR_CODE_OFST 0 @@ -311,10 +324,17 @@ #define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) #define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) #define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4) -/* Points to the recovery mode entry point. */ +/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */ #define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) #define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) #define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4) +/* Points to the recovery mode entry point. Same as above, but the right name. */ +#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4) + +/* Points to noflash mode entry point. */ +#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4) /* The command set exported by the boot ROM (MCDI v0) */ #define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ @@ -368,7 +388,7 @@ #define MCDI_EVENT_LEVEL_LBN 33 #define MCDI_EVENT_LEVEL_WIDTH 3 /* enum: Info. */ -#define MCDI_EVENT_LEVEL_INFO 0x0 +#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum: Warning. */ #define MCDI_EVENT_LEVEL_WARN 0x1 /* enum: Error. */ @@ -376,6 +396,7 @@ /* enum: Fatal. */ #define MCDI_EVENT_LEVEL_FATAL 0x3 #define MCDI_EVENT_DATA_OFST 0 +#define MCDI_EVENT_DATA_LEN 4 #define MCDI_EVENT_CMDDONE_SEQ_LBN 0 #define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 #define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 @@ -386,14 +407,22 @@ #define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 #define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 #define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 +/* enum: Link is down or link speed could not be determined */ +#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0 /* enum: 100Mbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 +#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum: 1Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 +#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum: 10Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 +#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum: 40Gbs */ -#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +/* enum: 25Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5 +/* enum: 50Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6 +/* enum: 100Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7 #define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 #define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 @@ -482,8 +511,23 @@ #define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf /* enum: Notify that the attempt to run FPGA Controller firmware timedout */ #define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10 +/* enum: Failure to probe one or more FPGA boot flash chips */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11 +/* enum: FPGA boot-flash contains an invalid image header */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12 +/* enum: Failed to program clocks required by the FPGA */ +#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13 +/* enum: Notify that FPGA Controller is alive to serve MCDI requests */ +#define MCDI_EVENT_AOE_FC_RUNNING 0x14 #define MCDI_EVENT_AOE_ERR_DATA_LBN 8 #define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8 +/* enum: FC Assert happened, but the register information is not available */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0 +/* enum: The register information for FC Assert is ready for readinng by driver + */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8 /* enum: Reading from NV failed */ @@ -536,6 +580,22 @@ #define MCDI_EVENT_MUM_WATCHDOG 0x3 #define MCDI_EVENT_MUM_ERR_DATA_LBN 8 #define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_DBRET_SEQ_LBN 0 +#define MCDI_EVENT_DBRET_SEQ_WIDTH 8 +#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0 +#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8 +/* enum: Corrupted or bad SUC application. */ +#define MCDI_EVENT_SUC_BAD_APP 0x1 +/* enum: SUC application reported an assert. */ +#define MCDI_EVENT_SUC_ASSERT 0x2 +/* enum: SUC application reported an exception. */ +#define MCDI_EVENT_SUC_EXCEPTION 0x3 +/* enum: SUC watchdog timer expired. */ +#define MCDI_EVENT_SUC_WATCHDOG 0x4 +#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8 +#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24 +#define MCDI_EVENT_SUC_ERR_DATA_LBN 8 +#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24 #define MCDI_EVENT_DATA_LBN 0 #define MCDI_EVENT_DATA_WIDTH 32 #define MCDI_EVENT_SRC_LBN 36 @@ -569,23 +629,23 @@ /* enum: Transmit error */ #define MCDI_EVENT_CODE_TX_ERR 0xb /* enum: Tx flush has completed */ -#define MCDI_EVENT_CODE_TX_FLUSH 0xc +#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum: PTP packet received timestamp */ -#define MCDI_EVENT_CODE_PTP_RX 0xd +#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum: PTP NIC failure */ -#define MCDI_EVENT_CODE_PTP_FAULT 0xe +#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum: PTP PPS event */ -#define MCDI_EVENT_CODE_PTP_PPS 0xf +#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum: Rx flush has completed */ -#define MCDI_EVENT_CODE_RX_FLUSH 0x10 +#define MCDI_EVENT_CODE_RX_FLUSH 0x10 /* enum: Receive error */ #define MCDI_EVENT_CODE_RX_ERR 0x11 /* enum: AOE fault */ -#define MCDI_EVENT_CODE_AOE 0x12 +#define MCDI_EVENT_CODE_AOE 0x12 /* enum: Network port calibration failed (VCAL). */ -#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 +#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 /* enum: HW PPS event */ -#define MCDI_EVENT_CODE_HW_PPS 0x14 +#define MCDI_EVENT_CODE_HW_PPS 0x14 /* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and * a different format) */ @@ -608,73 +668,99 @@ * been processed and it may now resend the command */ #define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d +/* enum: MCDI command accepted. New commands can be issued but this command is + * not done yet. + */ +#define MCDI_EVENT_CODE_DBRET 0x1e +/* enum: The MC has detected a fault on the SUC */ +#define MCDI_EVENT_CODE_SUC 0x1f /* enum: Artificial event generated by host and posted via MC for test * purposes. */ -#define MCDI_EVENT_CODE_TESTGEN 0xfa +#define MCDI_EVENT_CODE_TESTGEN 0xfa #define MCDI_EVENT_CMDDONE_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_DATA_LEN 4 #define MCDI_EVENT_CMDDONE_DATA_LBN 0 #define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 #define MCDI_EVENT_LINKCHANGE_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4 #define MCDI_EVENT_LINKCHANGE_DATA_LBN 0 #define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32 #define MCDI_EVENT_SENSOREVT_DATA_OFST 0 +#define MCDI_EVENT_SENSOREVT_DATA_LEN 4 #define MCDI_EVENT_SENSOREVT_DATA_LBN 0 #define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32 #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4 #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 #define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 #define MCDI_EVENT_TX_ERR_DATA_OFST 0 +#define MCDI_EVENT_TX_ERR_DATA_LEN 4 #define MCDI_EVENT_TX_ERR_DATA_LBN 0 #define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of * timestamp */ #define MCDI_EVENT_PTP_SECONDS_OFST 0 +#define MCDI_EVENT_PTP_SECONDS_LEN 4 #define MCDI_EVENT_PTP_SECONDS_LBN 0 #define MCDI_EVENT_PTP_SECONDS_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of * timestamp */ #define MCDI_EVENT_PTP_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_MAJOR_LEN 4 #define MCDI_EVENT_PTP_MAJOR_LBN 0 #define MCDI_EVENT_PTP_MAJOR_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field * of timestamp */ #define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 +#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4 #define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 #define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of * timestamp */ #define MCDI_EVENT_PTP_MINOR_OFST 0 +#define MCDI_EVENT_PTP_MINOR_LEN 4 #define MCDI_EVENT_PTP_MINOR_LBN 0 #define MCDI_EVENT_PTP_MINOR_WIDTH 32 /* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet */ #define MCDI_EVENT_PTP_UUID_OFST 0 +#define MCDI_EVENT_PTP_UUID_LEN 4 #define MCDI_EVENT_PTP_UUID_LBN 0 #define MCDI_EVENT_PTP_UUID_WIDTH 32 #define MCDI_EVENT_RX_ERR_DATA_OFST 0 +#define MCDI_EVENT_RX_ERR_DATA_LEN 4 #define MCDI_EVENT_RX_ERR_DATA_LBN 0 #define MCDI_EVENT_RX_ERR_DATA_WIDTH 32 #define MCDI_EVENT_PAR_ERR_DATA_OFST 0 +#define MCDI_EVENT_PAR_ERR_DATA_LEN 4 #define MCDI_EVENT_PAR_ERR_DATA_LBN 0 #define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32 #define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4 #define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0 #define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 #define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 /* For CODE_PTP_TIME events, the major value of the PTP clock */ #define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4 #define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0 #define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ #define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 #define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8 /* For CODE_PTP_TIME events where report sync status is enabled, indicates * whether the NIC clock has ever been set */ @@ -690,10 +776,17 @@ */ #define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 #define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6 #define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4 #define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 #define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 #define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4 #define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 #define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 /* Zero means that the request has been completed or authorized, and the driver @@ -702,6 +795,10 @@ */ #define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 #define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 +#define MCDI_EVENT_DBRET_DATA_OFST 0 +#define MCDI_EVENT_DBRET_DATA_LEN 4 +#define MCDI_EVENT_DBRET_DATA_LBN 0 +#define MCDI_EVENT_DBRET_DATA_WIDTH 32 /* FCDI_EVENT structuredef */ #define FCDI_EVENT_LEN 8 @@ -710,7 +807,7 @@ #define FCDI_EVENT_LEVEL_LBN 33 #define FCDI_EVENT_LEVEL_WIDTH 3 /* enum: Info. */ -#define FCDI_EVENT_LEVEL_INFO 0x0 +#define FCDI_EVENT_LEVEL_INFO 0x0 /* enum: Warning. */ #define FCDI_EVENT_LEVEL_WARN 0x1 /* enum: Error. */ @@ -718,6 +815,7 @@ /* enum: Fatal. */ #define FCDI_EVENT_LEVEL_FATAL 0x3 #define FCDI_EVENT_DATA_OFST 0 +#define FCDI_EVENT_DATA_LEN 4 #define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 #define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 #define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ @@ -757,6 +855,7 @@ #define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */ #define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */ #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 #define FCDI_EVENT_ASSERT_TYPE_LBN 36 @@ -764,12 +863,15 @@ #define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36 #define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8 #define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4 #define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0 #define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32 #define FCDI_EVENT_LINK_STATE_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_DATA_LEN 4 #define FCDI_EVENT_LINK_STATE_DATA_LBN 0 #define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 #define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_STATE_LEN 4 #define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ #define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ #define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ @@ -778,6 +880,7 @@ #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 /* Index of MC port being referred to */ @@ -785,9 +888,11 @@ #define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 /* FC Port index that matches the MC port index in SRC */ #define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4 #define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 #define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 #define FCDI_EVENT_BOOT_RESULT_OFST 0 +#define FCDI_EVENT_BOOT_RESULT_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */ #define FCDI_EVENT_BOOT_RESULT_LBN 0 @@ -804,14 +909,17 @@ #define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) /* Number of timestamps following */ #define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4 #define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 #define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32 /* Seconds field of a timestamp record */ #define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4 #define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64 #define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32 /* Nanoseconds field of a timestamp record */ #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4 #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 #define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 /* Timestamp records comprising the event */ @@ -831,7 +939,7 @@ #define MUM_EVENT_LEVEL_LBN 33 #define MUM_EVENT_LEVEL_WIDTH 3 /* enum: Info. */ -#define MUM_EVENT_LEVEL_INFO 0x0 +#define MUM_EVENT_LEVEL_INFO 0x0 /* enum: Warning. */ #define MUM_EVENT_LEVEL_WARN 0x1 /* enum: Error. */ @@ -839,6 +947,7 @@ /* enum: Fatal. */ #define MUM_EVENT_LEVEL_FATAL 0x3 #define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_DATA_LEN 4 #define MUM_EVENT_SENSOR_ID_LBN 0 #define MUM_EVENT_SENSOR_ID_WIDTH 8 /* Enum values, see field(s): */ @@ -876,18 +985,23 @@ /* enum: Link fault has been asserted, or has cleared. */ #define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 #define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LEN 4 #define MUM_EVENT_SENSOR_DATA_LBN 0 #define MUM_EVENT_SENSOR_DATA_WIDTH 32 #define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4 #define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 #define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 #define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4 #define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 #define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 #define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LEN 4 #define MUM_EVENT_PORT_PHY_CAPS_LBN 0 #define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 #define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_TECH_LEN 4 #define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ #define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ #define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ @@ -911,7 +1025,9 @@ /***********************************/ /* MC_CMD_READ32 - * Read multiple 32byte words from MC memory. + * Read multiple 32byte words from MC memory. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. */ #define MC_CMD_READ32 0x1 #undef MC_CMD_0x1_PRIVILEGE_CTG @@ -921,7 +1037,9 @@ /* MC_CMD_READ32_IN msgrequest */ #define MC_CMD_READ32_IN_LEN 8 #define MC_CMD_READ32_IN_ADDR_OFST 0 +#define MC_CMD_READ32_IN_ADDR_LEN 4 #define MC_CMD_READ32_IN_NUMWORDS_OFST 4 +#define MC_CMD_READ32_IN_NUMWORDS_LEN 4 /* MC_CMD_READ32_OUT msgresponse */ #define MC_CMD_READ32_OUT_LENMIN 4 @@ -940,13 +1058,14 @@ #define MC_CMD_WRITE32 0x2 #undef MC_CMD_0x2_PRIVILEGE_CTG -#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_WRITE32_IN msgrequest */ #define MC_CMD_WRITE32_IN_LENMIN 8 #define MC_CMD_WRITE32_IN_LENMAX 252 #define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num)) #define MC_CMD_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_WRITE32_IN_ADDR_LEN 4 #define MC_CMD_WRITE32_IN_BUFFER_OFST 4 #define MC_CMD_WRITE32_IN_BUFFER_LEN 4 #define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1 @@ -958,12 +1077,14 @@ /***********************************/ /* MC_CMD_COPYCODE - * Copy MC code between two locations and jump. + * Copy MC code between two locations and jump. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. */ #define MC_CMD_COPYCODE 0x3 #undef MC_CMD_0x3_PRIVILEGE_CTG -#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_COPYCODE_IN msgrequest */ #define MC_CMD_COPYCODE_IN_LEN 16 @@ -974,6 +1095,7 @@ * is a bitfield, with each bit as documented below. */ #define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and @@ -999,9 +1121,12 @@ #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1 /* Destination address */ #define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 +#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4 #define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 +#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4 /* Address of where to jump after copy. */ #define MC_CMD_COPYCODE_IN_JUMP_OFST 12 +#define MC_CMD_COPYCODE_IN_JUMP_LEN 4 /* enum: Control should return to the caller rather than jumping */ #define MC_CMD_COPYCODE_JUMP_NONE 0x1 @@ -1016,12 +1141,13 @@ #define MC_CMD_SET_FUNC 0x4 #undef MC_CMD_0x4_PRIVILEGE_CTG -#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_FUNC_IN msgrequest */ #define MC_CMD_SET_FUNC_IN_LEN 4 /* Set function */ #define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 +#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4 /* MC_CMD_SET_FUNC_OUT msgresponse */ #define MC_CMD_SET_FUNC_OUT_LEN 0 @@ -1034,7 +1160,7 @@ #define MC_CMD_GET_BOOT_STATUS 0x5 #undef MC_CMD_0x5_PRIVILEGE_CTG -#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_BOOT_STATUS_IN msgrequest */ #define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 @@ -1043,9 +1169,11 @@ #define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 /* ?? */ #define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4 /* enum: indicates that the MC wasn't flash booted */ -#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1 @@ -1069,11 +1197,13 @@ #define MC_CMD_GET_ASSERTS_IN_LEN 4 /* Set to clear assertion */ #define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 +#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4 /* MC_CMD_GET_ASSERTS_OUT msgresponse */ #define MC_CMD_GET_ASSERTS_OUT_LEN 140 /* Assertion status flag. */ #define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4 /* enum: No assertions have failed. */ #define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum: A system-level assertion has failed. */ @@ -1086,6 +1216,7 @@ #define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 /* Failing PC value */ #define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4 /* Saved GP regs */ #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 @@ -1096,7 +1227,9 @@ #define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 /* Failing thread address */ #define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4 #define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4 /***********************************/ @@ -1113,12 +1246,14 @@ #define MC_CMD_LOG_CTRL_IN_LEN 8 /* Log destination */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4 /* enum: UART. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum: Event queue. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* Legacy argument. Must be zero. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4 /* MC_CMD_LOG_CTRL_OUT msgresponse */ #define MC_CMD_LOG_CTRL_OUT_LEN 0 @@ -1140,23 +1275,29 @@ #define MC_CMD_GET_VERSION_EXT_IN_LEN 4 /* placeholder, set to 0 */ #define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0 +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4 /* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */ #define MC_CMD_GET_VERSION_V0_OUT_LEN 4 #define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 /* enum: Reserved version number to indicate "any" version. */ #define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum: Bootrom version value for Siena. */ #define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000 /* enum: Bootrom version value for Huntington. */ #define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001 +/* enum: Bootrom version value for Medford2. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002 /* MC_CMD_GET_VERSION_OUT msgresponse */ #define MC_CMD_GET_VERSION_OUT_LEN 32 /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ #define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4 /* 128bit mask of functions supported by the current firmware */ #define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 #define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 @@ -1168,9 +1309,11 @@ /* MC_CMD_GET_VERSION_EXT_OUT msgresponse */ #define MC_CMD_GET_VERSION_EXT_OUT_LEN 48 /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ #define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4 /* 128bit mask of functions supported by the current firmware */ #define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8 #define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16 @@ -1183,2421 +1326,6 @@ #define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16 -/***********************************/ -/* MC_CMD_FC - * Perform an FC operation - */ -#define MC_CMD_FC 0x9 - -/* MC_CMD_FC_IN msgrequest */ -#define MC_CMD_FC_IN_LEN 4 -#define MC_CMD_FC_IN_OP_HDR_OFST 0 -#define MC_CMD_FC_IN_OP_LBN 0 -#define MC_CMD_FC_IN_OP_WIDTH 8 -/* enum: NULL MCDI command to FC. */ -#define MC_CMD_FC_OP_NULL 0x1 -/* enum: Unused opcode */ -#define MC_CMD_FC_OP_UNUSED 0x2 -/* enum: MAC driver commands */ -#define MC_CMD_FC_OP_MAC 0x3 -/* enum: Read FC memory */ -#define MC_CMD_FC_OP_READ32 0x4 -/* enum: Write to FC memory */ -#define MC_CMD_FC_OP_WRITE32 0x5 -/* enum: Read FC memory */ -#define MC_CMD_FC_OP_TRC_READ 0x6 -/* enum: Write to FC memory */ -#define MC_CMD_FC_OP_TRC_WRITE 0x7 -/* enum: FC firmware Version */ -#define MC_CMD_FC_OP_GET_VERSION 0x8 -/* enum: Read FC memory */ -#define MC_CMD_FC_OP_TRC_RX_READ 0x9 -/* enum: Write to FC memory */ -#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa -/* enum: SFP parameters */ -#define MC_CMD_FC_OP_SFP 0xb -/* enum: DDR3 test */ -#define MC_CMD_FC_OP_DDR_TEST 0xc -/* enum: Get Crash context from FC */ -#define MC_CMD_FC_OP_GET_ASSERT 0xd -/* enum: Get FPGA Build registers */ -#define MC_CMD_FC_OP_FPGA_BUILD 0xe -/* enum: Read map support commands */ -#define MC_CMD_FC_OP_READ_MAP 0xf -/* enum: FC Capabilities */ -#define MC_CMD_FC_OP_CAPABILITIES 0x10 -/* enum: FC Global flags */ -#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11 -/* enum: FC IO using relative addressing modes */ -#define MC_CMD_FC_OP_IO_REL 0x12 -/* enum: FPGA link information */ -#define MC_CMD_FC_OP_UHLINK 0x13 -/* enum: Configure loopbacks and link on FPGA ports */ -#define MC_CMD_FC_OP_SET_LINK 0x14 -/* enum: Licensing operations relating to AOE */ -#define MC_CMD_FC_OP_LICENSE 0x15 -/* enum: Startup information to the FC */ -#define MC_CMD_FC_OP_STARTUP 0x16 -/* enum: Configure a DMA read */ -#define MC_CMD_FC_OP_DMA 0x17 -/* enum: Configure a timed read */ -#define MC_CMD_FC_OP_TIMED_READ 0x18 -/* enum: Control UART logging */ -#define MC_CMD_FC_OP_LOG 0x19 -/* enum: Get the value of a given clock_id */ -#define MC_CMD_FC_OP_CLOCK 0x1a -/* enum: DDR3/QDR3 parameters */ -#define MC_CMD_FC_OP_DDR 0x1b -/* enum: PTP and timestamp control */ -#define MC_CMD_FC_OP_TIMESTAMP 0x1c -/* enum: Commands for SPI Flash interface */ -#define MC_CMD_FC_OP_SPI 0x1d -/* enum: Commands for diagnostic components */ -#define MC_CMD_FC_OP_DIAG 0x1e -/* enum: External AOE port. */ -#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0 -/* enum: Internal AOE port. */ -#define MC_CMD_FC_IN_PORT_INT_OFST 0x40 - -/* MC_CMD_FC_IN_NULL msgrequest */ -#define MC_CMD_FC_IN_NULL_LEN 4 -#define MC_CMD_FC_IN_CMD_OFST 0 - -/* MC_CMD_FC_IN_PHY msgrequest */ -#define MC_CMD_FC_IN_PHY_LEN 5 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* FC PHY driver operation code */ -#define MC_CMD_FC_IN_PHY_OP_OFST 4 -#define MC_CMD_FC_IN_PHY_OP_LEN 1 -/* enum: PHY init handler */ -#define MC_CMD_FC_OP_PHY_OP_INIT 0x1 -/* enum: PHY reconfigure handler */ -#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2 -/* enum: PHY reboot handler */ -#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3 -/* enum: PHY get_supported_cap handler */ -#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4 -/* enum: PHY get_config handler */ -#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5 -/* enum: PHY get_media_info handler */ -#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6 -/* enum: PHY set_led handler */ -#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7 -/* enum: PHY lasi_interrupt handler */ -#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8 -/* enum: PHY check_link handler */ -#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9 -/* enum: PHY fill_stats handler */ -#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa -/* enum: PHY bpx_link_state_changed handler */ -#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb -/* enum: PHY get_state handler */ -#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc -/* enum: PHY start_bist handler */ -#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd -/* enum: PHY poll_bist handler */ -#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe -/* enum: PHY nvram_test handler */ -#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf -/* enum: PHY relinquish handler */ -#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10 -/* enum: PHY read connection from FC - may be not required */ -#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11 -/* enum: PHY read flags from FC - may be not required */ -#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12 - -/* MC_CMD_FC_IN_PHY_INIT msgrequest */ -#define MC_CMD_FC_IN_PHY_INIT_LEN 4 -#define MC_CMD_FC_IN_PHY_CMD_OFST 0 - -/* MC_CMD_FC_IN_MAC msgrequest */ -#define MC_CMD_FC_IN_MAC_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_MAC_HEADER_OFST 4 -#define MC_CMD_FC_IN_MAC_OP_LBN 0 -#define MC_CMD_FC_IN_MAC_OP_WIDTH 8 -/* enum: MAC reconfigure handler */ -#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1 -/* enum: MAC Set command - same as MC_CMD_SET_MAC */ -#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2 -/* enum: MAC statistics */ -#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3 -/* enum: MAC RX statistics */ -#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6 -/* enum: MAC TX statistics */ -#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7 -/* enum: MAC Read status */ -#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8 -#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8 -#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8 -/* enum: External FPGA port. */ -#define MC_CMD_FC_PORT_EXT 0x0 -/* enum: Internal Siena-facing FPGA ports. */ -#define MC_CMD_FC_PORT_INT 0x1 -#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16 -#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8 -#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24 -#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8 -/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are - * irrelevant. Port number is derived from pci_fn; passed in FC header. - */ -#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0 -/* enum: Override default port number. Port number determined by fields - * PORT_TYPE and PORT_IDX. - */ -#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1 - -/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */ -#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */ -#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ -/* MTU size */ -#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8 -/* Drain Tx FIFO */ -#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12 -#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16 -#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8 -#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16 -#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20 -#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24 -#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0 -#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1 -#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1 -#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1 -#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28 - -/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */ -#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */ -#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */ -#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */ -#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ -/* MC Statistics index */ -#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8 -#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12 -#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0 -#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1 -#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1 -#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1 -#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2 -#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1 -/* Number of statistics to read */ -#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16 -#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */ -#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */ - -/* MC_CMD_FC_IN_READ32 msgrequest */ -#define MC_CMD_FC_IN_READ32_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4 -#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8 -#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12 - -/* MC_CMD_FC_IN_WRITE32 msgrequest */ -#define MC_CMD_FC_IN_WRITE32_LENMIN 16 -#define MC_CMD_FC_IN_WRITE32_LENMAX 252 -#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num)) -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4 -#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8 -#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12 -#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4 -#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1 -#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60 - -/* MC_CMD_FC_IN_TRC_READ msgrequest */ -#define MC_CMD_FC_IN_TRC_READ_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4 -#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8 - -/* MC_CMD_FC_IN_TRC_WRITE msgrequest */ -#define MC_CMD_FC_IN_TRC_WRITE_LEN 28 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4 -#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8 -#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12 -#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4 -#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4 - -/* MC_CMD_FC_IN_GET_VERSION msgrequest */ -#define MC_CMD_FC_IN_GET_VERSION_LEN 4 -/* MC_CMD_FC_IN_CMD_OFST 0 */ - -/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */ -#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4 -#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8 - -/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */ -#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4 -#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8 -#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12 -#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4 -#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2 - -/* MC_CMD_FC_IN_SFP msgrequest */ -#define MC_CMD_FC_IN_SFP_LEN 28 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* Link speed is 100, 1000, 10000, 40000 */ -#define MC_CMD_FC_IN_SFP_SPEED_OFST 4 -/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */ -#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8 -/* Not relevant for cards with QSFP modules. For older cards, true if module is - * a dual speed SFP+ module. - */ -#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12 -/* True if an SFP Module is present (other fields valid when true) */ -#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16 -/* The type of the SFP+ Module. For later cards with QSFP modules, this field - * is unused and the type is communicated by other means. - */ -#define MC_CMD_FC_IN_SFP_TYPE_OFST 20 -/* Capabilities corresponding to 1 bits. */ -#define MC_CMD_FC_IN_SFP_CAPS_OFST 24 - -/* MC_CMD_FC_IN_DDR_TEST msgrequest */ -#define MC_CMD_FC_IN_DDR_TEST_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 -#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0 -#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8 -/* enum: DRAM Test Start */ -#define MC_CMD_FC_OP_DDR_TEST_START 0x1 -/* enum: DRAM Test Poll */ -#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2 - -/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */ -#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */ -#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8 -#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0 -#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1 -#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1 -#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1 -#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2 -#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1 -#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3 -#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1 - -/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */ -#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12 -#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0 -/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */ -/* Clear previous test result and prepare for restarting DDR test */ -#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8 - -/* MC_CMD_FC_IN_GET_ASSERT msgrequest */ -#define MC_CMD_FC_IN_GET_ASSERT_LEN 4 -/* MC_CMD_FC_IN_CMD_OFST 0 */ - -/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */ -#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* FPGA build info operation code */ -#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4 -/* enum: Get the build registers */ -#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1 -/* enum: Get the services registers */ -#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2 -/* enum: Get the BSP version */ -#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3 -/* enum: Get build register for V2 (SFA974X) */ -#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4 -/* enum: GEt the services register for V2 (SFA974X) */ -#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5 - -/* MC_CMD_FC_IN_READ_MAP msgrequest */ -#define MC_CMD_FC_IN_READ_MAP_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 -#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0 -#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8 -/* enum: Get the number of map regions */ -#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1 -/* enum: Get the specified map */ -#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2 - -/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */ -#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */ -#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */ -#define MC_CMD_FC_IN_MAP_INDEX_OFST 8 - -/* MC_CMD_FC_IN_CAPABILITIES msgrequest */ -#define MC_CMD_FC_IN_CAPABILITIES_LEN 4 -/* MC_CMD_FC_IN_CMD_OFST 0 */ - -/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */ -#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5 -#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1 - -/* MC_CMD_FC_IN_IO_REL msgrequest */ -#define MC_CMD_FC_IN_IO_REL_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 -#define MC_CMD_FC_IN_IO_REL_OP_LBN 0 -#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8 -/* enum: Get the base address that the FC applies to relative commands */ -#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1 -/* enum: Read data */ -#define MC_CMD_FC_IN_IO_REL_READ32 0x2 -/* enum: Write data */ -#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3 -#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8 -#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8 -/* enum: Application address space */ -#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1 -/* enum: Flash address space */ -#define MC_CMD_FC_COMP_TYPE_FLASH 0x2 - -/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */ -#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ - -/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */ -#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ -#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8 -#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12 -#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16 - -/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */ -#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20 -#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252 -#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num)) -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ -#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8 -#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12 -#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16 -#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4 -#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1 -#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59 - -/* MC_CMD_FC_IN_UHLINK msgrequest */ -#define MC_CMD_FC_IN_UHLINK_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 -#define MC_CMD_FC_IN_UHLINK_OP_LBN 0 -#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8 -/* enum: Get PHY configuration info */ -#define MC_CMD_FC_OP_UHLINK_PHY 0x1 -/* enum: Get MAC configuration info */ -#define MC_CMD_FC_OP_UHLINK_MAC 0x2 -/* enum: Get Rx eye table */ -#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3 -/* enum: Get Rx eye plot */ -#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4 -/* enum: Get Rx eye plot */ -#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5 -/* enum: Retune Rx settings */ -#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6 -/* enum: Set loopback mode on fpga port */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7 -/* enum: Get loopback mode config state on fpga port */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8 -#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8 -#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8 -#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16 -#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8 -#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24 -#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8 -/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are - * irrelevant. Port number is derived from pci_fn; passed in FC header. - */ -#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0 -/* enum: Override default port number. Port number determined by fields - * PORT_TYPE and PORT_IDX. - */ -#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1 - -/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */ -#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ - -/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */ -#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ - -/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */ -#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ -#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8 -#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */ - -/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */ -#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ - -/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */ -#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ -#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8 -#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12 -#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16 -#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */ - -/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */ -#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ - -/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8 -#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */ -#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */ -#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12 -#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */ -#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */ - -/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ -#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8 - -/* MC_CMD_FC_IN_SET_LINK msgrequest */ -#define MC_CMD_FC_IN_SET_LINK_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ -#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4 -#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8 -#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12 -#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0 -#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1 -#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1 -#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1 -#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2 -#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1 - -/* MC_CMD_FC_IN_LICENSE msgrequest */ -#define MC_CMD_FC_IN_LICENSE_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_LICENSE_OP_OFST 4 -#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */ -#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */ - -/* MC_CMD_FC_IN_STARTUP msgrequest */ -#define MC_CMD_FC_IN_STARTUP_LEN 40 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4 -#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8 -/* Length of identifier */ -#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12 -/* Identifier for AOE FPGA */ -#define MC_CMD_FC_IN_STARTUP_ID_OFST 16 -#define MC_CMD_FC_IN_STARTUP_ID_LEN 1 -#define MC_CMD_FC_IN_STARTUP_ID_NUM 24 - -/* MC_CMD_FC_IN_DMA msgrequest */ -#define MC_CMD_FC_IN_DMA_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DMA_OP_OFST 4 -#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */ -#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */ - -/* MC_CMD_FC_IN_DMA_STOP msgrequest */ -#define MC_CMD_FC_IN_DMA_STOP_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DMA_OP_OFST 4 */ -/* FC supplied handle */ -#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8 - -/* MC_CMD_FC_IN_DMA_READ msgrequest */ -#define MC_CMD_FC_IN_DMA_READ_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DMA_OP_OFST 4 */ -#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8 -#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12 - -/* MC_CMD_FC_IN_TIMED_READ msgrequest */ -#define MC_CMD_FC_IN_TIMED_READ_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 -#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */ -#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */ -#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */ - -/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */ -#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ -/* Host supplied handle (unique) */ -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8 -/* Address into which to transfer data in host */ -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12 -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8 -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12 -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16 -/* AOE address from which to transfer data */ -#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20 -#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8 -#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20 -#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24 -/* Length of AOE transfer (total) */ -#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28 -/* Length of host transfer (total) */ -#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32 -/* Offset back from aoe_address to apply operation to */ -#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36 -/* Data to apply at offset */ -#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40 -#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44 -#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0 -#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1 -#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1 -#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1 -#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2 -#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1 -#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3 -#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2 -#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */ -#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */ -#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */ -#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */ -/* Period at which reads are performed (100ms units) */ -#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48 - -/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */ -#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ -/* FC supplied handle */ -#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8 - -/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */ -#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ -/* FC supplied handle */ -#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8 - -/* MC_CMD_FC_IN_LOG msgrequest */ -#define MC_CMD_FC_IN_LOG_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_LOG_OP_OFST 4 -#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */ -#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */ - -/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */ -#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_LOG_OP_OFST 4 */ -/* Partition offset into flash */ -#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8 -/* Partition length */ -#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12 -/* Partition erase size */ -#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16 - -/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */ -#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_LOG_OP_OFST 4 */ -/* Enable/disable printing to JTAG UART */ -#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8 - -/* MC_CMD_FC_IN_CLOCK msgrequest */ -#define MC_CMD_FC_IN_CLOCK_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_CLOCK_OP_OFST 4 -#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */ -#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */ -/* Perform a clock operation */ -#define MC_CMD_FC_IN_CLOCK_ID_OFST 8 -#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */ -#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */ - -/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest */ -#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */ -/* Retrieve the clock value of the specified clock */ -/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */ - -/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest */ -#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */ -/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */ -#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12 -#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8 -#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12 -#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16 -/* Set the clock value of the specified clock */ -#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20 - -/* MC_CMD_FC_IN_DDR msgrequest */ -#define MC_CMD_FC_IN_DDR_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DDR_OP_OFST 4 -#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */ -#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */ -#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */ -#define MC_CMD_FC_IN_DDR_BANK_OFST 8 -#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */ -#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */ -#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */ -#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */ -#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */ - -/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */ -#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ -/* Affected bank */ -/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ -/* Flags */ -#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12 -#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */ -/* 128-byte page of serial presence detect data read from module's EEPROM */ -#define MC_CMD_FC_IN_DDR_SPD_OFST 16 -#define MC_CMD_FC_IN_DDR_SPD_LEN 1 -#define MC_CMD_FC_IN_DDR_SPD_NUM 128 -/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */ -#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144 - -/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */ -#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ -/* Affected bank */ -/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ -/* Size of DDR */ -#define MC_CMD_FC_IN_DDR_SIZE_OFST 12 - -/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */ -#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ -/* Affected bank */ -/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ - -/* MC_CMD_FC_IN_TIMESTAMP msgrequest */ -#define MC_CMD_FC_IN_TIMESTAMP_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* FC timestamp operation code */ -#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4 -/* enum: Read transmit timestamp(s) */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0 -/* enum: Read snapshot timestamps */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1 -/* enum: Clear all transmit timestamps */ -#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2 - -/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4 -/* Control filtering of the returned timestamp and sequence number specified - * here - */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8 -/* enum: Return most recent timestamp. No filtering */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0 -/* enum: Match timestamp against the PTP clock ID, port number and sequence - * number specified - */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1 -/* Clock identity of PTP packet for which timestamp required */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12 -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8 -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12 -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16 -/* Port number of PTP packet for which timestamp required */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20 -/* Sequence number of PTP packet for which timestamp required */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24 - -/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4 - -/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */ -#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4 - -/* MC_CMD_FC_IN_SPI msgrequest */ -#define MC_CMD_FC_IN_SPI_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* Basic commands for SPI Flash. */ -#define MC_CMD_FC_IN_SPI_OP_OFST 4 -/* enum: SPI Flash read */ -#define MC_CMD_FC_IN_SPI_READ 0x0 -/* enum: SPI Flash write */ -#define MC_CMD_FC_IN_SPI_WRITE 0x1 -/* enum: SPI Flash erase */ -#define MC_CMD_FC_IN_SPI_ERASE 0x2 - -/* MC_CMD_FC_IN_SPI_READ msgrequest */ -#define MC_CMD_FC_IN_SPI_READ_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4 -#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8 -#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12 - -/* MC_CMD_FC_IN_SPI_WRITE msgrequest */ -#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16 -#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252 -#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num)) -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4 -#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8 -#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12 -#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4 -#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1 -#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60 - -/* MC_CMD_FC_IN_SPI_ERASE msgrequest */ -#define MC_CMD_FC_IN_SPI_ERASE_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4 -#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8 -#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12 - -/* MC_CMD_FC_IN_DIAG msgrequest */ -#define MC_CMD_FC_IN_DIAG_LEN 8 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -/* Operation code indicating component type */ -#define MC_CMD_FC_IN_DIAG_OP_OFST 4 -/* enum: Power noise generator. */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0 -/* enum: DDR soak test component. */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1 -/* enum: Diagnostics datapath control component. */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2 - -/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4 -/* Sub-opcode describing the operation to be carried out */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8 -/* enum: Read the configuration (the 32-bit values in each of the clock enable - * count and toggle count registers) - */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0 -/* enum: Write a new configuration to the clock enable count and toggle count - * registers - */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1 - -/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8 - -/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8 -/* The 32-bit value to be written to the toggle count register */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12 -/* The 32-bit value to be written to the clock enable count register */ -#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16 - -/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4 -/* Sub-opcode describing the operation to be carried out */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8 -/* enum: Starts DDR soak test on selected banks */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0 -/* enum: Read status of DDR soak test */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1 -/* enum: Stop test */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2 -/* enum: Set or clear bit that triggers fake errors. These cause subsequent - * tests to fail until the bit is cleared. - */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3 - -/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8 -/* Mask of DDR banks to be tested */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12 -/* Pattern to use in the soak test */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */ -/* Either multiple automatic tests until a STOP command is issued, or one - * single test - */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */ - -/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8 -/* DDR bank to read status from */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12 -#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */ -#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */ -#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */ -#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */ -#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */ - -/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8 -/* Mask of DDR banks to be tested */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12 - -/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8 -/* Mask of DDR banks to set/clear error flag on */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16 -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */ -#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */ - -/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4 -/* Sub-opcode describing the operation to be carried out */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8 -/* enum: Set a known datapath configuration */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0 -/* enum: Apply raw config to datapath control registers */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1 - -/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8 -/* Datapath configuration identifier */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12 -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */ - -/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24 -/* MC_CMD_FC_IN_CMD_OFST 0 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4 -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8 -/* Value to write into control register 1 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12 -/* Value to write into control register 2 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16 -/* Value to write into control register 3 */ -#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20 - -/* MC_CMD_FC_OUT msgresponse */ -#define MC_CMD_FC_OUT_LEN 0 - -/* MC_CMD_FC_OUT_NULL msgresponse */ -#define MC_CMD_FC_OUT_NULL_LEN 0 - -/* MC_CMD_FC_OUT_READ32 msgresponse */ -#define MC_CMD_FC_OUT_READ32_LENMIN 4 -#define MC_CMD_FC_OUT_READ32_LENMAX 252 -#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num)) -#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0 -#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4 -#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1 -#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63 - -/* MC_CMD_FC_OUT_WRITE32 msgresponse */ -#define MC_CMD_FC_OUT_WRITE32_LEN 0 - -/* MC_CMD_FC_OUT_TRC_READ msgresponse */ -#define MC_CMD_FC_OUT_TRC_READ_LEN 16 -#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0 -#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4 -#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4 - -/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */ -#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0 - -/* MC_CMD_FC_OUT_GET_VERSION msgresponse */ -#define MC_CMD_FC_OUT_GET_VERSION_LEN 12 -#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0 -#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4 -#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8 -#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4 -#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8 - -/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */ -#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8 -#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0 -#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4 -#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2 - -/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */ -#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0 - -/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */ -#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0 - -/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */ -#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0 - -/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */ -#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4 -#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0 - -/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */ -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3) -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8 -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4 -#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS -#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */ -#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */ -#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */ -#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */ -#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */ -#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */ -#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */ -#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */ -#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */ -#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */ -#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */ -#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */ -#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */ -/* enum: (Last entry) */ -#define MC_CMD_FC_MAC_RX_NSTATS 0x19 - -/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */ -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3) -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8 -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4 -#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS -#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */ -#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */ -#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */ -#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */ -#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */ -#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */ -#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */ -#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */ -#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */ -#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */ -#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */ -#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */ -#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */ -/* enum: (Last entry) */ -#define MC_CMD_FC_MAC_TX_NSTATS 0x16 - -/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */ -#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3) -/* MAC Statistics */ -#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8 -#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0 -#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4 -#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK - -/* MC_CMD_FC_OUT_MAC msgresponse */ -#define MC_CMD_FC_OUT_MAC_LEN 0 - -/* MC_CMD_FC_OUT_SFP msgresponse */ -#define MC_CMD_FC_OUT_SFP_LEN 0 - -/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */ -#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0 - -/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8 -/* enum: Test not yet initiated */ -#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0 -/* enum: Test is in progress */ -#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1 -/* enum: Timed completed */ -#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2 -/* enum: Test did not complete in specified time */ -#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1 -/* Test result from FPGA */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5 -#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */ -#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */ - -/* MC_CMD_FC_OUT_DDR_TEST msgresponse */ -#define MC_CMD_FC_OUT_DDR_TEST_LEN 0 - -/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */ -#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144 -/* Assertion status flag. */ -#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0 -#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8 -#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8 -/* enum: No crash data available */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 -/* enum: New crash data available */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 -/* enum: Crash data has been sent */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 -#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0 -#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8 -/* enum: No crash has been recorded. */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 -/* enum: Crash due to exception. */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 -/* enum: Crash due to assertion. */ -#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 -/* Failing PC value */ -#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4 -/* Saved GP regs */ -#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8 -#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4 -#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31 -/* Exception Type */ -#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132 -/* Instruction at which exception occurred */ -#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136 -/* BAD Address that triggered address-based exception */ -#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140 - -/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */ -#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32 -#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31 -#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30 -#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4 -/* Build timestamp (seconds since epoch) */ -#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8 -#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */ -#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10 -#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18 -#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27 -#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28 -#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2 -#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31 -#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12 -#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1 -#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */ -#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20 -#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24 -#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28 -#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16 - -/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4 -/* Build timestamp (seconds since epoch) */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4 -#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */ -#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1 -/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */ -/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */ -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16 - -/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */ -#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32 -#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31 -#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30 -#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14 -#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12 -#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4 -/* Build timestamp (seconds since epoch) */ -#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27 -#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28 -#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29 -#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30 -#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31 -#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12 -#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20 -#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24 -#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28 -#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16 - -/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */ -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4 -/* Build timestamp (seconds since epoch) */ -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1 -/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */ -/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */ -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0 -#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16 - -/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */ -#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4 -/* Qsys system ID */ -#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0 -#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12 -#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4 -#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4 -#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8 -#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0 -#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4 - -/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */ -#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4 -/* Number of maps */ -#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0 - -/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164 -/* Index of the map */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0 -/* Options for the map */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */ -/* Address of start of map */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12 -/* Length of address map */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20 -/* Component information field */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24 -/* License expiry data for map */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32 -/* Name of the component */ -#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1 -#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128 - -/* MC_CMD_FC_OUT_READ_MAP msgresponse */ -#define MC_CMD_FC_OUT_READ_MAP_LEN 0 - -/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */ -#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8 -/* Number of internal ports */ -#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0 -/* Number of external ports */ -#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4 - -/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */ -#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4 -#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0 - -/* MC_CMD_FC_OUT_IO_REL msgresponse */ -#define MC_CMD_FC_OUT_IO_REL_LEN 0 - -/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */ -#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8 -#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0 -#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4 - -/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */ -#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4 -#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252 -#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num)) -#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0 -#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4 -#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1 -#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63 - -/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */ -#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0 - -/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16 -/* Transceiver Transmit settings */ -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16 -/* Transceiver Receive settings */ -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16 -/* Rx eye opening */ -#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12 -#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16 -#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16 -/* PCS status word */ -#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16 -/* Link status word */ -#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20 -#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1 -#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1 -#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1 -/* Current SFp parameters applied */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24 -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20 -/* Link speed is 100, 1000, 10000 */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24 -/* Length of copper cable - zero when not relevant */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28 -/* True if a dual speed SFP+ module */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32 -/* True if an SFP Module is present (other fields valid when true) */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36 -/* The type of the SFP+ Module */ -#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40 -/* PHY config flags */ -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2 -#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1 - -/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20 -/* MAC configuration applied */ -#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0 -/* MTU size */ -#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4 -/* IF Mode status */ -#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8 -/* MAC address configured */ -#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12 -#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8 -#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12 -#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16 - -/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3) -/* Rx Eye measurements */ -#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0 -#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4 -#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK - -/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0 - -/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3) -/* Has the eye plot dump completed and data returned is valid? */ -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0 -/* Rx Eye binary plot */ -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4 -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8 -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4 -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8 -#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK - -/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0 - -/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0 - -/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4 -#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0 - -/* MC_CMD_FC_OUT_UHLINK msgresponse */ -#define MC_CMD_FC_OUT_UHLINK_LEN 0 - -/* MC_CMD_FC_OUT_SET_LINK msgresponse */ -#define MC_CMD_FC_OUT_SET_LINK_LEN 0 - -/* MC_CMD_FC_OUT_LICENSE msgresponse */ -#define MC_CMD_FC_OUT_LICENSE_LEN 12 -/* Count of valid keys */ -#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0 -/* Count of invalid keys */ -#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4 -/* Count of blacklisted keys */ -#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8 - -/* MC_CMD_FC_OUT_STARTUP msgresponse */ -#define MC_CMD_FC_OUT_STARTUP_LEN 4 -/* Capabilities of the FPGA/FC */ -#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0 -#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0 -#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1 - -/* MC_CMD_FC_OUT_DMA_READ msgresponse */ -#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1 -#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252 -#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num)) -/* The data read */ -#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0 -#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1 -#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1 -#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252 - -/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */ -#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4 -/* Timer handle */ -#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0 - -/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52 -/* Host supplied handle (unique) */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0 -/* Address into which to transfer data in host */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4 -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8 -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4 -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8 -/* AOE address from which to transfer data */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12 -#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8 -#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12 -#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16 -/* Length of AOE transfer (total) */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20 -/* Length of host transfer (total) */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24 -/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28 -#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32 -/* When active, start read time */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40 -/* When active, end read time */ -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44 -#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48 - -/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */ -#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0 - -/* MC_CMD_FC_OUT_LOG msgresponse */ -#define MC_CMD_FC_OUT_LOG_LEN 0 - -/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */ -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16 -#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20 - -/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */ -#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0 - -/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */ -#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0 - -/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */ -#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0 - -/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */ -#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4 -#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0 -#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0 -#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1 -#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1 -#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1 - -/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */ -#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4 - -/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */ -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num)) -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0 -#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31 - -/* MC_CMD_FC_OUT_SPI_READ msgresponse */ -#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4 -#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252 -#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num)) -#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0 -#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4 -#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1 -#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63 - -/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */ -#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0 - -/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */ -#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */ -#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8 -/* The 32-bit value read from the toggle count register */ -#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0 -/* The 32-bit value read from the clock enable count register */ -#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4 - -/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */ -#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8 -/* DDR soak test status word; bits [4:0] are relevant. */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4 -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1 -/* DDR soak test error count */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4 - -/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0 - -/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */ -#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0 - - -/***********************************/ -/* MC_CMD_AOE - * AOE operations on MC - */ -#define MC_CMD_AOE 0xa - -/* MC_CMD_AOE_IN msgrequest */ -#define MC_CMD_AOE_IN_LEN 4 -#define MC_CMD_AOE_IN_OP_HDR_OFST 0 -#define MC_CMD_AOE_IN_OP_LBN 0 -#define MC_CMD_AOE_IN_OP_WIDTH 8 -/* enum: FPGA and CPLD information */ -#define MC_CMD_AOE_OP_INFO 0x1 -/* enum: Currents and voltages read from MCP3424s; DEBUG */ -#define MC_CMD_AOE_OP_CURRENTS 0x2 -/* enum: Temperatures at locations around the PCB; DEBUG */ -#define MC_CMD_AOE_OP_TEMPERATURES 0x3 -/* enum: Set CPLD to idle */ -#define MC_CMD_AOE_OP_CPLD_IDLE 0x4 -/* enum: Read from CPLD register */ -#define MC_CMD_AOE_OP_CPLD_READ 0x5 -/* enum: Write to CPLD register */ -#define MC_CMD_AOE_OP_CPLD_WRITE 0x6 -/* enum: Execute CPLD instruction */ -#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7 -/* enum: Reprogram the CPLD on the AOE device */ -#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8 -/* enum: AOE power control */ -#define MC_CMD_AOE_OP_POWER 0x9 -/* enum: AOE image loading */ -#define MC_CMD_AOE_OP_LOAD 0xa -/* enum: Fan monitoring */ -#define MC_CMD_AOE_OP_FAN_CONTROL 0xb -/* enum: Fan failures since last reset */ -#define MC_CMD_AOE_OP_FAN_FAILURES 0xc -/* enum: Get generic AOE MAC statistics */ -#define MC_CMD_AOE_OP_MAC_STATS 0xd -/* enum: Retrieve PHY specific information */ -#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe -/* enum: Write a number of JTAG primitive commands, return will give data */ -#define MC_CMD_AOE_OP_JTAG_WRITE 0xf -/* enum: Control access to the FPGA via the Siena JTAG Chain */ -#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10 -/* enum: Set the MTU offset between Siena and AOE MACs */ -#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11 -/* enum: How link state is handled */ -#define MC_CMD_AOE_OP_LINK_STATE 0x12 -/* enum: How Siena MAC statistics are reported (deprecated - use - * MC_CMD_AOE_OP_ASIC_STATS) - */ -#define MC_CMD_AOE_OP_SIENA_STATS 0x13 -/* enum: How native ASIC MAC statistics are reported - replaces the deprecated - * command MC_CMD_AOE_OP_SIENA_STATS - */ -#define MC_CMD_AOE_OP_ASIC_STATS 0x13 -/* enum: DDR memory information */ -#define MC_CMD_AOE_OP_DDR 0x14 -/* enum: FC control */ -#define MC_CMD_AOE_OP_FC 0x15 -/* enum: DDR ECC status reads */ -#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16 -/* enum: Commands for MC-SPI Master emulation */ -#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17 -/* enum: Commands for FC boot control */ -#define MC_CMD_AOE_OP_FC_BOOT 0x18 - -/* MC_CMD_AOE_OUT msgresponse */ -#define MC_CMD_AOE_OUT_LEN 0 - -/* MC_CMD_AOE_IN_INFO msgrequest */ -#define MC_CMD_AOE_IN_INFO_LEN 4 -#define MC_CMD_AOE_IN_CMD_OFST 0 - -/* MC_CMD_AOE_IN_CURRENTS msgrequest */ -#define MC_CMD_AOE_IN_CURRENTS_LEN 4 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ - -/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */ -#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ - -/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */ -#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ - -/* MC_CMD_AOE_IN_CPLD_READ msgrequest */ -#define MC_CMD_AOE_IN_CPLD_READ_LEN 12 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4 -#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8 - -/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */ -#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4 -#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8 -#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12 - -/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */ -#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4 - -/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */ -#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4 -/* enum: Reprogram CPLD, poll for completion */ -#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1 -/* enum: Reprogram CPLD, send event on completion */ -#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3 -/* enum: Get status of reprogramming operation */ -#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4 - -/* MC_CMD_AOE_IN_POWER msgrequest */ -#define MC_CMD_AOE_IN_POWER_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* Turn on or off AOE power */ -#define MC_CMD_AOE_IN_POWER_OP_OFST 4 -/* enum: Turn off FPGA power */ -#define MC_CMD_AOE_IN_POWER_OFF 0x0 -/* enum: Turn on FPGA power */ -#define MC_CMD_AOE_IN_POWER_ON 0x1 -/* enum: Clear peak power measurement */ -#define MC_CMD_AOE_IN_POWER_CLEAR 0x2 -/* enum: Show current power in sensors output */ -#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3 -/* enum: Show peak power in sensors output */ -#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4 -/* enum: Show current DDR current */ -#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5 -/* enum: Show peak DDR current */ -#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6 -/* enum: Clear peak DDR current */ -#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7 - -/* MC_CMD_AOE_IN_LOAD msgrequest */ -#define MC_CMD_AOE_IN_LOAD_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence - */ -#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4 - -/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */ -#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* If non zero report measured fan RPM rather than nominal */ -#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4 - -/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */ -#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ - -/* MC_CMD_AOE_IN_MAC_STATS msgrequest */ -#define MC_CMD_AOE_IN_MAC_STATS_LEN 24 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* AOE port */ -#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4 -/* Host memory address for statistics */ -#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8 -#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8 -#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8 -#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12 -#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16 -#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0 -#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1 -#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5 -#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1 -#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16 -#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16 -/* Length of DMA data (optional) */ -#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20 - -/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */ -#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* AOE port */ -#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4 -#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8 - -/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */ -#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12 -#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252 -#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num)) -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4 -#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8 -#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4 -#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1 -#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61 - -/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */ -#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* Enable or disable access */ -#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4 -/* enum: Enable access */ -#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1 -/* enum: Disable access */ -#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2 - -/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */ -#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */ -#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4 -/* enum: Apply to all external ports */ -#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000 -/* enum: Apply to all internal ports */ -#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000 -/* The MTU offset to be applied to the external ports */ -#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8 - -/* MC_CMD_AOE_IN_LINK_STATE msgrequest */ -#define MC_CMD_AOE_IN_LINK_STATE_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4 -#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0 -#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8 -/* enum: AOE and associated external port */ -#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0 -/* enum: AOE and OR of all external ports */ -#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1 -/* enum: Individual ports */ -#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2 -/* enum: Configure link state mode on given AOE port */ -#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3 -#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8 -#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8 -/* enum: No-op */ -#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0 -/* enum: logical OR of all SFP ports link status */ -#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1 -/* enum: logical AND of all SFP ports link status */ -#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2 -#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16 -#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16 - -/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */ -#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* How MAC statistics are reported */ -#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4 -/* enum: Statistics from Siena (default) */ -#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0 -/* enum: Statistics from AOE external ports */ -#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1 - -/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */ -#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* How MAC statistics are reported */ -#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4 -/* enum: Statistics from the ASIC (default) */ -#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0 -/* enum: Statistics from AOE external ports */ -#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1 - -/* MC_CMD_AOE_IN_DDR msgrequest */ -#define MC_CMD_AOE_IN_DDR_LEN 12 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_DDR_BANK_OFST 4 -/* Enum values, see field(s): */ -/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */ -/* Page index of SPD data */ -#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8 - -/* MC_CMD_AOE_IN_FC msgrequest */ -#define MC_CMD_AOE_IN_FC_LEN 4 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ - -/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */ -#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4 -/* Enum values, see field(s): */ -/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */ - -/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* Basic commands for MC SPI Master emulation. */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4 -/* enum: MC SPI read */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0 -/* enum: MC SPI write */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1 - -/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4 -#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8 - -/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4 -#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8 -#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12 - -/* MC_CMD_AOE_IN_FC_BOOT msgrequest */ -#define MC_CMD_AOE_IN_FC_BOOT_LEN 8 -/* MC_CMD_AOE_IN_CMD_OFST 0 */ -/* FC boot control flags */ -#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4 -#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0 -#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1 - -/* MC_CMD_AOE_OUT_INFO msgresponse */ -#define MC_CMD_AOE_OUT_INFO_LEN 44 -/* JTAG IDCODE of CPLD */ -#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0 -/* Version of CPLD */ -#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4 -/* JTAG IDCODE of FPGA */ -#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8 -/* JTAG USERCODE of FPGA */ -#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12 -/* FPGA type - read from CPLD straps */ -#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16 -#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */ -#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */ -/* FPGA state (debug) */ -#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20 -/* FPGA image - partition from which loaded */ -#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24 -/* FC state */ -#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28 -/* enum: Set if watchdog working */ -#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1 -/* enum: Set if MC-FC communications working */ -#define MC_CMD_AOE_OUT_INFO_COMMS 0x2 -/* Random pieces of information */ -#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32 -/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */ -#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1 -/* enum: CPLD apparently good */ -#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2 -/* enum: FPGA working normally */ -#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4 -/* enum: FPGA is powered */ -#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8 -/* enum: Board has incompatible SODIMMs fitted */ -#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10 -/* enum: Board has ByteBlaster connected */ -#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20 -/* enum: FPGA Boot flash has an invalid header. */ -#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40 -/* enum: FPGA Application flash is accessible. */ -#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80 -/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */ -#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36 -#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */ -#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */ -#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */ -#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */ -#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */ -/* Result of FC booting - not valid while a ByteBlaster is connected. */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40 -/* enum: No error */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0 -/* enum: Bad address set in CPLD */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1 -/* enum: Bad header */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2 -/* enum: Bad text section details */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3 -/* enum: Bad checksum */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4 -/* enum: Bad BSP */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5 -/* enum: Flash mode is invalid */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6 -/* enum: FC application loaded and execution attempted */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80 -/* enum: FC application Started */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81 -/* enum: No bootrom in FPGA */ -#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff - -/* MC_CMD_AOE_OUT_CURRENTS msgresponse */ -#define MC_CMD_AOE_OUT_CURRENTS_LEN 68 -/* Set of currents and voltages (mA or mV as appropriate) */ -#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0 -#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4 -#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17 -#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */ -#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */ - -/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */ -#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40 -/* Set of temperatures */ -#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0 -#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4 -#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10 -/* enum: The first set of enum values are for Modena code. */ -#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0 -#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */ -/* enum: The second set of enum values are for Sorrento code. */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0 -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */ -#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */ - -/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */ -#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4 -/* The value read from the CPLD */ -#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0 - -/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */ -#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4 -#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252 -#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num)) -/* Failure counts for each fan */ -#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0 -#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4 -#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1 -#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63 - -/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */ -#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4 -/* Results of status command (only) */ -#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0 - -/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */ -#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0 - -/* MC_CMD_AOE_OUT_POWER_ON msgresponse */ -#define MC_CMD_AOE_OUT_POWER_ON_LEN 0 - -/* MC_CMD_AOE_OUT_LOAD msgresponse */ -#define MC_CMD_AOE_OUT_LOAD_LEN 0 - -/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */ -#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0 - -/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA - * for details - */ -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3) -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0 -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8 -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0 -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4 -#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS - -/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */ -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num)) -/* in bytes */ -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1 -#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248 - -/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */ -#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12 -#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252 -#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num)) -/* Used to align the in and out data blocks so the MC can re-use the cmd */ -#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0 -/* out bytes */ -#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4 -#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8 -#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4 -#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1 -#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61 - -/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */ -#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0 - -/* MC_CMD_AOE_OUT_DDR msgresponse */ -#define MC_CMD_AOE_OUT_DDR_LENMIN 17 -#define MC_CMD_AOE_OUT_DDR_LENMAX 252 -#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num)) -/* Information on the module. */ -#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0 -#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0 -#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1 -#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2 -#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3 -#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1 -/* Memory size, in MB. */ -#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4 -/* The memory type, as reported from SPD information */ -#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8 -/* Nominal voltage of the module (as applied) */ -#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12 -/* SPD data read from the module */ -#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16 -#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1 -#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1 -#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236 - -/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */ -#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0 - -/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */ -#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0 - -/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */ -#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0 - -/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */ -#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0 - -/* MC_CMD_AOE_OUT_FC msgresponse */ -#define MC_CMD_AOE_OUT_FC_LEN 0 - -/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */ -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8 -/* Flags describing status info on the module. */ -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1 -/* DDR ECC status on the module. */ -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24 -#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8 - -/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */ -#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4 -#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0 - -/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */ -#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0 - -/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */ -#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0 - -/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */ -#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0 - - /***********************************/ /* MC_CMD_PTP * Perform PTP operation @@ -3616,41 +1344,54 @@ #define MC_CMD_PTP_OP_ENABLE 0x1 /* enum: Disable PTP packet timestamping operation. */ #define MC_CMD_PTP_OP_DISABLE 0x2 -/* enum: Send a PTP packet. */ +/* enum: Send a PTP packet. This operation is used on Siena and Huntington. + * From Medford onwards it is not supported: on those platforms PTP transmit + * timestamping is done using the fast path. + */ #define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum: Read the current NIC time. */ #define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 -/* enum: Get the current PTP status. */ +/* enum: Get the current PTP status. Note that the clock frequency returned (in + * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666). + */ #define MC_CMD_PTP_OP_STATUS 0x5 /* enum: Adjust the PTP NIC's time. */ #define MC_CMD_PTP_OP_ADJUST 0x6 /* enum: Synchronize host and NIC time. */ #define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 -/* enum: Basic manufacturing tests. */ +/* enum: Basic manufacturing tests. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 -/* enum: Packet based manufacturing tests. */ +/* enum: Packet based manufacturing tests. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum: Reset some of the PTP related statistics */ #define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum: Debug operations to MC. */ #define MC_CMD_PTP_OP_DEBUG 0xb -/* enum: Read an FPGA register */ +/* enum: Read an FPGA register. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_FPGAREAD 0xc -/* enum: Write an FPGA register */ +/* enum: Write an FPGA register. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_FPGAWRITE 0xd /* enum: Apply an offset to the NIC clock */ #define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe -/* enum: Change Apply an offset to the NIC clock */ +/* enum: Change the frequency correction applied to the NIC clock */ #define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf -/* enum: Set the MC packet filter VLAN tags for received PTP packets */ +/* enum: Set the MC packet filter VLAN tags for received PTP packets. + * Deprecated for Huntington onwards. + */ #define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10 -/* enum: Set the MC packet filter UUID for received PTP packets */ +/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for + * Huntington onwards. + */ #define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11 -/* enum: Set the MC packet filter Domain for received PTP packets */ +/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated + * for Huntington onwards. + */ #define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12 -/* enum: Set the clock source */ +/* enum: Set the clock source. Required for snapper tests on Huntington and + * Medford. Not implemented for Siena or Medford2. + */ #define MC_CMD_PTP_OP_SET_CLK_SRC 0x13 -/* enum: Reset value of Timer Reg. */ +/* enum: Reset value of Timer Reg. Not implemented. */ #define MC_CMD_PTP_OP_RST_CLK 0x14 /* enum: Enable the forwarding of PPS events to the host */ #define MC_CMD_PTP_OP_PPS_ENABLE 0x15 @@ -3671,7 +1412,7 @@ /* enum: Unsubscribe to stop receiving time events */ #define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19 /* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS - * input on the same NIC. + * input on the same NIC. Siena PTP adapters only. */ #define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a /* enum: Set the PTP sync status. Status is used by firmware to report to event @@ -3684,11 +1425,15 @@ /* MC_CMD_PTP_IN_ENABLE msgrequest */ #define MC_CMD_PTP_IN_ENABLE_LEN 16 #define MC_CMD_PTP_IN_CMD_OFST 0 +#define MC_CMD_PTP_IN_CMD_LEN 4 #define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 -/* Event queue for PTP events */ +#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4 +/* Not used. Events are always sent to function relative queue 0. */ #define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 -/* PTP timestamping mode */ +#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4 +/* PTP timestamping mode. Not used from Huntington onwards. */ #define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 +#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4 /* enum: PTP, version 1 */ #define MC_CMD_PTP_MODE_V1 0x0 /* enum: PTP, version 1, with VLAN headers - deprecated */ @@ -3705,16 +1450,21 @@ /* MC_CMD_PTP_IN_DISABLE msgrequest */ #define MC_CMD_PTP_IN_DISABLE_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_TRANSMIT msgrequest */ #define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 #define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252 #define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Transmit packet length */ #define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4 /* Transmit packet data */ #define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 #define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 @@ -3724,17 +1474,30 @@ /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ #define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_STATUS msgrequest */ #define MC_CMD_PTP_IN_STATUS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_ADJUST msgrequest */ #define MC_CMD_PTP_IN_ADJUST_LEN 24 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Frequency adjustment 40 bit fixed point ns */ #define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 #define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 @@ -3742,21 +1505,67 @@ #define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 /* enum: Number of fractional bits in frequency adjustment */ #define MC_CMD_PTP_IN_ADJUST_BITS 0x28 +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c /* Time adjustment in seconds */ #define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4 /* Time adjustment major value */ #define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4 /* Time adjustment in nanoseconds */ #define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4 /* Time adjustment minor value */ #define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4 /* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ #define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Number of time readings to capture */ #define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4 /* Host address in which to write "synchronization started" indication (64 * bits) */ @@ -3768,42 +1577,58 @@ /* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */ #define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */ #define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Enable or disable packet testing */ #define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4 -/* MC_CMD_PTP_IN_RESET_STATS msgrequest */ +/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */ #define MC_CMD_PTP_IN_RESET_STATS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ -/* Reset PTP statistics */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_DEBUG msgrequest */ #define MC_CMD_PTP_IN_DEBUG_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Debug operations */ #define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4 /* MC_CMD_PTP_IN_FPGAREAD msgrequest */ #define MC_CMD_PTP_IN_FPGAREAD_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ #define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4 #define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4 /* MC_CMD_PTP_IN_FPGAWRITE msgrequest */ #define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13 #define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252 #define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num)) /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ #define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1 @@ -3812,34 +1637,67 @@ /* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Time adjustment in seconds */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4 /* Time adjustment major value */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4 /* Time adjustment in nanoseconds */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4 /* Time adjustment minor value */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4 /* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */ #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Frequency adjustment 40 bit fixed point ns */ #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8 #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8 #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8 #define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12 -/* enum: Number of fractional bits in frequency adjustment */ -/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ +/* Enum values, see field(s): */ +/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */ /* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */ #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Number of VLAN tags, 0 if not VLAN */ #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4 /* Set of VLAN tags to filter against */ #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12 #define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4 @@ -3848,9 +1706,12 @@ /* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */ #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* 1 to enable UUID filtering, 0 to disable */ #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4 /* UUID to filter against */ #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12 #define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8 @@ -3860,62 +1721,82 @@ /* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */ #define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* 1 to enable Domain filtering, 0 to disable */ #define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4 /* Domain number to filter against */ #define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4 /* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */ #define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Set the clock source. */ #define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8 +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4 /* enum: Internal. */ #define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0 /* enum: External. */ #define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1 -/* MC_CMD_PTP_IN_RST_CLK msgrequest */ +/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */ #define MC_CMD_PTP_IN_RST_CLK_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ -/* Reset value of Timer Reg. */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */ #define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* Enable or disable */ #define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4 +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4 /* enum: Enable */ #define MC_CMD_PTP_ENABLE_PPS 0x0 /* enum: Disable */ #define MC_CMD_PTP_DISABLE_PPS 0x1 -/* Queue id to send events back */ +/* Not used. Events are always sent to function relative queue 0. */ #define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4 /* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */ #define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */ #define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */ #define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Original field containing queue ID. Now extended to include flags. */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 @@ -3924,29 +1805,39 @@ /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* Unsubscribe options */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4 /* enum: Unsubscribe a single queue */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0 /* enum: Unsubscribe all queues */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1 /* Event queue ID */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4 /* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */ #define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* 1 to enable PPS test mode, 0 to disable and return result. */ #define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4 /* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 /* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ /* NIC - Host System Clock Synchronization status */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4 /* enum: Host System clock and NIC clock are not in sync */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 /* enum: Host System clock and NIC clock are synchronized */ @@ -3955,8 +1846,11 @@ * no longer in sync. */ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4 #define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4 #define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4 /* MC_CMD_PTP_OUT msgresponse */ #define MC_CMD_PTP_OUT_LEN 0 @@ -3965,12 +1859,16 @@ #define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4 /* Timestamp major value */ #define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4 /* Timestamp minor value */ #define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4 /* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */ #define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0 @@ -3982,47 +1880,85 @@ #define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4 /* Timestamp major value */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4 /* Timestamp minor value */ #define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4 +/* Upper 32bits of major timestamp value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4 /* MC_CMD_PTP_OUT_STATUS msgresponse */ #define MC_CMD_PTP_OUT_STATUS_LEN 64 /* Frequency of NIC's hardware clock */ #define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4 /* Number of packets transmitted and timestamped */ #define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4 /* Number of packets received and timestamped */ #define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4 /* Number of packets timestamped by the FPGA */ #define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4 /* Number of packets filter matched */ #define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4 /* Number of packets not filter matched */ #define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4 /* Number of PPS overflows (noise on input?) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4 /* Number of PPS bad periods */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4 /* Minimum period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4 /* Maximum period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4 /* Last period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4 /* Mean period of PPS pulse in nanoseconds */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4 /* Minimum offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4 /* Maximum offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4 /* Last offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4 /* Mean offset of PPS pulse in nanoseconds (signed) */ #define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 @@ -4035,23 +1971,31 @@ #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 /* Host time immediately before NIC's hardware clock read */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4 /* Value of seconds timestamp */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4 /* Timestamp major value */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4 /* Value of nanoseconds timestamp */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4 /* Timestamp minor value */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4 /* Host time immediately after NIC's hardware clock read */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4 /* Number of nanoseconds waited after reading NIC's hardware clock */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4 /* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 /* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4 /* enum: Successful test */ #define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum: FPGA load failed */ @@ -4084,15 +2028,19 @@ #define MC_CMD_PTP_MANF_CLOCK_READ 0xe /* Presence of external oscillator */ #define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4 /* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 /* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4 /* Number of packets received by FPGA */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4 /* Number of packets received by Siena filters */ #define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4 /* MC_CMD_PTP_OUT_FPGAREAD msgresponse */ #define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1 @@ -4108,9 +2056,11 @@ /* Time format required/used by for this NIC. Applies to all PTP MCDI * operations that pass times between the host and firmware. If this operation * is not supported (older firmware) a format of seconds and nanoseconds should - * be assumed. + * be assumed. Note this enum is deprecated. Do not add to it- use the + * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead. */ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4 /* enum: Times are in seconds and nanoseconds */ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ @@ -4126,12 +2076,16 @@ * be assumed. */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4 /* enum: Times are in seconds and nanoseconds */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1 /* enum: Major register has units of seconds, minor 2^-27s per tick */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2 +/* enum: Major register units are seconds, minor units are quarter nanoseconds + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3 /* Minimum acceptable value for a corrected synchronization timeset. When * comparing host and NIC clock times, the MC returns a set of samples that * contain the host start and end time, the MC time when the host start was @@ -4140,46 +2094,66 @@ * end and start times minus the time that the MC waited for host end. */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4 /* Various PTP capabilities */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 /* Uncorrected error on PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4 /* Uncorrected error on PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4 /* Uncorrected error on PPS output in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4 /* Uncorrected error on PPS input in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24 /* Uncorrected error on PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4 /* Uncorrected error on PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4 /* Uncorrected error on PPS output in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4 /* Uncorrected error on PPS input in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4 /* Uncorrected error on non-PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4 /* Uncorrected error on non-PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4 /* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 /* Results of testing */ #define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ @@ -4194,14 +2168,17 @@ #define MC_CMD_CSR_READ32 0xc #undef MC_CMD_0xc_PRIVILEGE_CTG -#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_CSR_READ32_IN msgrequest */ #define MC_CMD_CSR_READ32_IN_LEN 12 /* Address */ #define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4 #define MC_CMD_CSR_READ32_IN_STEP_OFST 4 +#define MC_CMD_CSR_READ32_IN_STEP_LEN 4 #define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4 /* MC_CMD_CSR_READ32_OUT msgresponse */ #define MC_CMD_CSR_READ32_OUT_LENMIN 4 @@ -4221,7 +2198,7 @@ #define MC_CMD_CSR_WRITE32 0xd #undef MC_CMD_0xd_PRIVILEGE_CTG -#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_CSR_WRITE32_IN msgrequest */ #define MC_CMD_CSR_WRITE32_IN_LENMIN 12 @@ -4229,7 +2206,9 @@ #define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) /* Address */ #define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4 #define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4 #define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 #define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4 #define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1 @@ -4238,6 +2217,7 @@ /* MC_CMD_CSR_WRITE32_OUT msgresponse */ #define MC_CMD_CSR_WRITE32_OUT_LEN 4 #define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4 /***********************************/ @@ -4248,7 +2228,7 @@ #define MC_CMD_HP 0x54 #undef MC_CMD_0x54_PRIVILEGE_CTG -#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_HP_IN msgrequest */ #define MC_CMD_HP_IN_LEN 16 @@ -4259,6 +2239,7 @@ * sensors. */ #define MC_CMD_HP_IN_SUBCMD_OFST 0 +#define MC_CMD_HP_IN_SUBCMD_LEN 4 /* enum: OCSD (Option Card Sensor Data) sub-command. */ #define MC_CMD_HP_IN_OCSD_SUBCMD 0x0 /* enum: Last known valid HP sub-command. */ @@ -4273,10 +2254,12 @@ * NULL.) */ #define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12 +#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4 /* MC_CMD_HP_OUT msgresponse */ #define MC_CMD_HP_OUT_LEN 4 #define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0 +#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4 /* enum: OCSD stopped for this card. */ #define MC_CMD_HP_OUT_OCSD_STOPPED 0x1 /* enum: OCSD was successfully started with the address provided. */ @@ -4323,29 +2306,35 @@ * external devices. */ #define MC_CMD_MDIO_READ_IN_BUS_OFST 0 +#define MC_CMD_MDIO_READ_IN_BUS_LEN 4 /* enum: Internal. */ #define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum: External. */ #define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* Port address */ #define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4 /* Device Address or clause 22. */ #define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. */ #define MC_CMD_MDIO_CLAUSE22 0x20 /* Address */ #define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4 /* MC_CMD_MDIO_READ_OUT msgresponse */ #define MC_CMD_MDIO_READ_OUT_LEN 8 /* Value */ #define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 +#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4 /* Status the MDIO commands return the raw status bits from the MDIO block. A * "good" transaction should have the DONE bit set and all other bits clear. */ #define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 +#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4 /* enum: Good. */ #define MC_CMD_MDIO_STATUS_GOOD 0x8 @@ -4365,22 +2354,27 @@ * external devices. */ #define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 +#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4 /* enum: Internal. */ /* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ /* enum: External. */ /* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ /* Port address */ #define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4 /* Device Address or clause 22. */ #define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. */ /* MC_CMD_MDIO_CLAUSE22 0x20 */ /* Address */ #define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4 /* Value */ #define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 +#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4 /* MC_CMD_MDIO_WRITE_OUT msgresponse */ #define MC_CMD_MDIO_WRITE_OUT_LEN 4 @@ -4388,6 +2382,7 @@ * "good" transaction should have the DONE bit set and all other bits clear. */ #define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 +#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4 /* enum: Good. */ /* MC_CMD_MDIO_STATUS_GOOD 0x8 */ @@ -4399,7 +2394,7 @@ #define MC_CMD_DBI_WRITE 0x12 #undef MC_CMD_0x12_PRIVILEGE_CTG -#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DBI_WRITE_IN msgrequest */ #define MC_CMD_DBI_WRITE_IN_LENMIN 12 @@ -4419,9 +2414,11 @@ /* MC_CMD_DBIWROP_TYPEDEF structuredef */ #define MC_CMD_DBIWROP_TYPEDEF_LEN 12 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 #define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4 #define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 #define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 #define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 @@ -4431,6 +2428,7 @@ #define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 #define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 #define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 @@ -4446,13 +2444,16 @@ #define MC_CMD_PORT_READ32_IN_LEN 4 /* Address */ #define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4 /* MC_CMD_PORT_READ32_OUT msgresponse */ #define MC_CMD_PORT_READ32_OUT_LEN 8 /* Value */ #define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4 /* Status */ #define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 +#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4 /***********************************/ @@ -4466,13 +2467,16 @@ #define MC_CMD_PORT_WRITE32_IN_LEN 8 /* Address */ #define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4 /* Value */ #define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4 /* MC_CMD_PORT_WRITE32_OUT msgresponse */ #define MC_CMD_PORT_WRITE32_OUT_LEN 4 /* Status */ #define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4 /***********************************/ @@ -4486,6 +2490,7 @@ #define MC_CMD_PORT_READ128_IN_LEN 4 /* Address */ #define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4 /* MC_CMD_PORT_READ128_OUT msgresponse */ #define MC_CMD_PORT_READ128_OUT_LEN 20 @@ -4494,6 +2499,7 @@ #define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 /* Status */ #define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 +#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4 /***********************************/ @@ -4507,6 +2513,7 @@ #define MC_CMD_PORT_WRITE128_IN_LEN 20 /* Address */ #define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4 /* Value */ #define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 #define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 @@ -4515,6 +2522,7 @@ #define MC_CMD_PORT_WRITE128_OUT_LEN 4 /* Status */ #define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4 /* MC_CMD_CAPABILITIES structuredef */ #define MC_CMD_CAPABILITIES_LEN 4 @@ -4560,24 +2568,54 @@ #define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136 #define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num)) #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4 #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 -/* See MC_CMD_CAPABILITIES */ +/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 -/* See MC_CMD_CAPABILITIES */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4 +/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4 +/* Base MAC address for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 +/* Base MAC address for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 #define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 +/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4 +/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port0. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port1. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ #define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 -/* This field contains a 16-bit value for each of the types of NVRAM area. The - * values are defined in the firmware/mc/platform/.c file for a specific board - * type, but otherwise have no meaning to the MC; they are used by the driver - * to manage selection of appropriate firmware updates. +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4 +/* Siena only. This field contains a 16-bit value for each of the types of + * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a + * specific board type, but otherwise have no meaning to the MC; they are used + * by the driver to manage selection of appropriate firmware updates. Unused on + * EF10 and later (use MC_CMD_NVRAM_METADATA). */ #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 @@ -4592,7 +2630,7 @@ #define MC_CMD_DBI_READX 0x19 #undef MC_CMD_0x19_PRIVILEGE_CTG -#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DBI_READX_IN msgrequest */ #define MC_CMD_DBI_READX_IN_LENMIN 8 @@ -4619,9 +2657,11 @@ /* MC_CMD_DBIRDOP_TYPEDEF structuredef */ #define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 #define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4 #define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0 #define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 #define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4 #define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 #define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 #define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 @@ -4639,7 +2679,7 @@ #define MC_CMD_SET_RAND_SEED 0x1a #undef MC_CMD_0x1a_PRIVILEGE_CTG -#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_RAND_SEED_IN msgrequest */ #define MC_CMD_SET_RAND_SEED_IN_LEN 16 @@ -4689,14 +2729,25 @@ #define MC_CMD_DRV_ATTACH_IN_LEN 12 /* new state to set if UPDATE=1 */ #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4 #define MC_CMD_DRV_ATTACH_LBN 0 #define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1 #define MC_CMD_DRV_PREBOOT_LBN 1 #define MC_CMD_DRV_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1 /* 1 to set new state, or 0 to just report the existing state */ #define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4 /* preferred datapath firmware (for Huntington; ignored for Siena) */ #define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4 /* enum: Prefer to use full featured firmware */ #define MC_CMD_FW_FULL_FEATURED 0x0 /* enum: Prefer to use firmware with fewer features but lower latency */ @@ -4713,20 +2764,35 @@ * support */ #define MC_CMD_FW_RULES_ENGINE 0x5 +/* enum: Prefer to use firmware with additional DPDK support */ +#define MC_CMD_FW_DPDK 0x6 +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +#define MC_CMD_FW_L3XUDP 0x7 +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe /* enum: Only this option is allowed for non-admin functions */ -#define MC_CMD_FW_DONT_CARE 0xffffffff +#define MC_CMD_FW_DONT_CARE 0xffffffff /* MC_CMD_DRV_ATTACH_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_OUT_LEN 4 /* previous or existing state, see the bitmask at NEW_STATE */ #define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4 /* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 /* previous or existing state, see the bitmask at NEW_STATE */ #define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4 /* Flags associated with this function */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4 /* enum: Labels the lowest-numbered function visible to the OS */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 /* enum: The function can control the link state of the physical port it is @@ -4739,6 +2805,11 @@ * refers to the Sorrento external FPGA port. */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3 +/* enum: If set, indicates that VI spreading is currently enabled. Will always + * indicate the current state, regardless of the value in the WANT_VI_SPREADING + * input. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4 /***********************************/ @@ -4751,6 +2822,7 @@ #define MC_CMD_SHMUART_IN_LEN 4 /* ??? */ #define MC_CMD_SHMUART_IN_FLAG_OFST 0 +#define MC_CMD_SHMUART_IN_FLAG_LEN 4 /* MC_CMD_SHMUART_OUT msgresponse */ #define MC_CMD_SHMUART_OUT_LEN 0 @@ -4789,6 +2861,7 @@ * (TBD). */ #define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4 #define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 #define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 @@ -4806,8 +2879,10 @@ #define MC_CMD_PCIE_CREDITS_IN_LEN 8 /* poll period. 0 is disabled */ #define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4 /* wipe statistics */ #define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 +#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4 /* MC_CMD_PCIE_CREDITS_OUT msgresponse */ #define MC_CMD_PCIE_CREDITS_OUT_LEN 16 @@ -4838,31 +2913,54 @@ /* MC_CMD_RXD_MONITOR_IN msgrequest */ #define MC_CMD_RXD_MONITOR_IN_LEN 12 #define MC_CMD_RXD_MONITOR_IN_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4 #define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4 #define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8 +#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4 /* MC_CMD_RXD_MONITOR_OUT msgresponse */ #define MC_CMD_RXD_MONITOR_OUT_LEN 80 #define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4 #define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4 /***********************************/ @@ -4872,13 +2970,14 @@ #define MC_CMD_PUTS 0x23 #undef MC_CMD_0x23_PRIVILEGE_CTG -#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_PUTS_IN msgrequest */ #define MC_CMD_PUTS_IN_LENMIN 13 #define MC_CMD_PUTS_IN_LENMAX 252 #define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) #define MC_CMD_PUTS_IN_DEST_OFST 0 +#define MC_CMD_PUTS_IN_DEST_LEN 4 #define MC_CMD_PUTS_IN_UART_LBN 0 #define MC_CMD_PUTS_IN_UART_WIDTH 1 #define MC_CMD_PUTS_IN_PORT_LBN 1 @@ -4911,6 +3010,7 @@ #define MC_CMD_GET_PHY_CFG_OUT_LEN 72 /* flags */ #define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 #define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 #define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1 @@ -4927,8 +3027,10 @@ #define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4 /* Bitmask of supported capabilities */ #define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4 #define MC_CMD_PHY_CAP_10HDX_LBN 1 #define MC_CMD_PHY_CAP_10HDX_WIDTH 1 #define MC_CMD_PHY_CAP_10FDX_LBN 2 @@ -4953,17 +3055,39 @@ #define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 #define MC_CMD_PHY_CAP_DDM_LBN 12 #define MC_CMD_PHY_CAP_DDM_WIDTH 1 +#define MC_CMD_PHY_CAP_100000FDX_LBN 13 +#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_25000FDX_LBN 14 +#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_50000FDX_LBN 15 +#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16 +#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_LBN 18 +#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 +#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 #define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4 /* enum: Xaui. */ #define MC_CMD_MEDIA_XAUI 0x1 /* enum: CX4. */ @@ -4979,6 +3103,7 @@ /* enum: QSFP+. */ #define MC_CMD_MEDIA_QSFP_PLUS 0x7 #define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4 /* enum: Native clause 22 */ #define MC_CMD_MMD_CLAUSE22 0x0 #define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ @@ -5004,12 +3129,13 @@ #define MC_CMD_START_BIST 0x25 #undef MC_CMD_0x25_PRIVILEGE_CTG -#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_START_BIST_IN msgrequest */ #define MC_CMD_START_BIST_IN_LEN 4 /* Type of test. */ #define MC_CMD_START_BIST_IN_TYPE_OFST 0 +#define MC_CMD_START_BIST_IN_TYPE_LEN 4 /* enum: Run the PHY's short cable BIST. */ #define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum: Run the PHY's long cable BIST. */ @@ -5043,7 +3169,7 @@ #define MC_CMD_POLL_BIST 0x26 #undef MC_CMD_0x26_PRIVILEGE_CTG -#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_POLL_BIST_IN msgrequest */ #define MC_CMD_POLL_BIST_IN_LEN 0 @@ -5052,6 +3178,7 @@ #define MC_CMD_POLL_BIST_OUT_LEN 8 /* result */ #define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 +#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 /* enum: Running. */ #define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum: Passed. */ @@ -5061,19 +3188,26 @@ /* enum: Timed-out. */ #define MC_CMD_POLL_BIST_TIMEOUT 0x4 #define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4 /* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ #define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 /* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4 #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4 /* Status of each channel A */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4 /* enum: Ok. */ #define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum: Open. */ @@ -5086,14 +3220,17 @@ #define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* Status of each channel B */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ /* Status of each channel C */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ /* Status of each channel D */ #define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4 /* Enum values, see field(s): */ /* CABLE_STATUS_A */ @@ -5101,9 +3238,11 @@ #define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 /* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ #define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4 /* enum: Complete. */ #define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum: Bus switch off I2C write. */ @@ -5127,9 +3266,11 @@ #define MC_CMD_POLL_BIST_OUT_MEM_LEN 36 /* result */ /* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ /* Enum values, see field(s): */ /* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ #define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4 /* enum: Test has completed. */ #define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0 /* enum: RAM test - walk ones. */ @@ -5146,8 +3287,10 @@ #define MC_CMD_POLL_BIST_MEM_ECC 0x6 /* Failure address, only valid if result is POLL_BIST_FAILED */ #define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8 +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4 /* Bus or address space to which the failure address corresponds */ #define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12 +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4 /* enum: MC MIPS bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 /* enum: CSR IREG bus. */ @@ -5168,14 +3311,19 @@ #define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8 /* Pattern written to RAM / register */ #define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4 /* Actual value read from RAM / register */ #define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20 +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4 /* ECC error mask */ #define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4 /* ECC parity error mask */ #define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4 /* ECC fatal error mask */ #define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4 /***********************************/ @@ -5222,83 +3370,83 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 /* enum: None. */ -#define MC_CMD_LOOPBACK_NONE 0x0 +#define MC_CMD_LOOPBACK_NONE 0x0 /* enum: Data. */ -#define MC_CMD_LOOPBACK_DATA 0x1 +#define MC_CMD_LOOPBACK_DATA 0x1 /* enum: GMAC. */ -#define MC_CMD_LOOPBACK_GMAC 0x2 +#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum: XGMII. */ #define MC_CMD_LOOPBACK_XGMII 0x3 /* enum: XGXS. */ -#define MC_CMD_LOOPBACK_XGXS 0x4 +#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum: XAUI. */ -#define MC_CMD_LOOPBACK_XAUI 0x5 +#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum: GMII. */ -#define MC_CMD_LOOPBACK_GMII 0x6 +#define MC_CMD_LOOPBACK_GMII 0x6 /* enum: SGMII. */ -#define MC_CMD_LOOPBACK_SGMII 0x7 +#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum: XGBR. */ -#define MC_CMD_LOOPBACK_XGBR 0x8 +#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum: XFI. */ -#define MC_CMD_LOOPBACK_XFI 0x9 +#define MC_CMD_LOOPBACK_XFI 0x9 /* enum: XAUI Far. */ -#define MC_CMD_LOOPBACK_XAUI_FAR 0xa +#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum: GMII Far. */ -#define MC_CMD_LOOPBACK_GMII_FAR 0xb +#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum: SGMII Far. */ -#define MC_CMD_LOOPBACK_SGMII_FAR 0xc +#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum: XFI Far. */ -#define MC_CMD_LOOPBACK_XFI_FAR 0xd +#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum: GPhy. */ -#define MC_CMD_LOOPBACK_GPHY 0xe +#define MC_CMD_LOOPBACK_GPHY 0xe /* enum: PhyXS. */ -#define MC_CMD_LOOPBACK_PHYXS 0xf +#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum: PCS. */ -#define MC_CMD_LOOPBACK_PCS 0x10 +#define MC_CMD_LOOPBACK_PCS 0x10 /* enum: PMA-PMD. */ -#define MC_CMD_LOOPBACK_PMAPMD 0x11 +#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum: Cross-Port. */ -#define MC_CMD_LOOPBACK_XPORT 0x12 +#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum: XGMII-Wireside. */ -#define MC_CMD_LOOPBACK_XGMII_WS 0x13 +#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum: XAUI Wireside. */ -#define MC_CMD_LOOPBACK_XAUI_WS 0x14 +#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum: XAUI Wireside Far. */ -#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 +#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum: XAUI Wireside near. */ -#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 +#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum: GMII Wireside. */ -#define MC_CMD_LOOPBACK_GMII_WS 0x17 +#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum: XFI Wireside. */ -#define MC_CMD_LOOPBACK_XFI_WS 0x18 +#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum: XFI Wireside Far. */ -#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 +#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum: PhyXS Wireside. */ -#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a +#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum: PMA lanes MAC-Serdes. */ -#define MC_CMD_LOOPBACK_PMA_INT 0x1b +#define MC_CMD_LOOPBACK_PMA_INT 0x1b /* enum: KR Serdes Parallel (Encoder). */ -#define MC_CMD_LOOPBACK_SD_NEAR 0x1c +#define MC_CMD_LOOPBACK_SD_NEAR 0x1c /* enum: KR Serdes Serial. */ -#define MC_CMD_LOOPBACK_SD_FAR 0x1d +#define MC_CMD_LOOPBACK_SD_FAR 0x1d /* enum: PMA lanes MAC-Serdes Wireside. */ -#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e +#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e /* enum: KR Serdes Parallel Wireside (Full PCS). */ -#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f +#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f /* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ -#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 +#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 /* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ -#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 +#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 /* enum: KR Serdes Serial Wireside. */ -#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 +#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 /* enum: Near side of AOE Siena side port */ -#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 /* enum: Medford Wireside datapath loopback */ -#define MC_CMD_LOOPBACK_DATA_WS 0x24 +#define MC_CMD_LOOPBACK_DATA_WS 0x24 /* enum: Force link up without setting up any physical loopback (snapper use * only) */ -#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 /* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 @@ -5328,6 +3476,174 @@ /* Enum values, see field(s): */ /* 100M */ +/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for + * newer NICs with 25G/50G/100G support + */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4 +/* enum: None. */ +/* MC_CMD_LOOPBACK_NONE 0x0 */ +/* enum: Data. */ +/* MC_CMD_LOOPBACK_DATA 0x1 */ +/* enum: GMAC. */ +/* MC_CMD_LOOPBACK_GMAC 0x2 */ +/* enum: XGMII. */ +/* MC_CMD_LOOPBACK_XGMII 0x3 */ +/* enum: XGXS. */ +/* MC_CMD_LOOPBACK_XGXS 0x4 */ +/* enum: XAUI. */ +/* MC_CMD_LOOPBACK_XAUI 0x5 */ +/* enum: GMII. */ +/* MC_CMD_LOOPBACK_GMII 0x6 */ +/* enum: SGMII. */ +/* MC_CMD_LOOPBACK_SGMII 0x7 */ +/* enum: XGBR. */ +/* MC_CMD_LOOPBACK_XGBR 0x8 */ +/* enum: XFI. */ +/* MC_CMD_LOOPBACK_XFI 0x9 */ +/* enum: XAUI Far. */ +/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ +/* enum: GMII Far. */ +/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ +/* enum: SGMII Far. */ +/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ +/* enum: XFI Far. */ +/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ +/* enum: GPhy. */ +/* MC_CMD_LOOPBACK_GPHY 0xe */ +/* enum: PhyXS. */ +/* MC_CMD_LOOPBACK_PHYXS 0xf */ +/* enum: PCS. */ +/* MC_CMD_LOOPBACK_PCS 0x10 */ +/* enum: PMA-PMD. */ +/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ +/* enum: Cross-Port. */ +/* MC_CMD_LOOPBACK_XPORT 0x12 */ +/* enum: XGMII-Wireside. */ +/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ +/* enum: XAUI Wireside. */ +/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ +/* enum: XAUI Wireside Far. */ +/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ +/* enum: XAUI Wireside near. */ +/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ +/* enum: GMII Wireside. */ +/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ +/* enum: XFI Wireside. */ +/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ +/* enum: XFI Wireside Far. */ +/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ +/* enum: PhyXS Wireside. */ +/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ +/* enum: PMA lanes MAC-Serdes. */ +/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ +/* enum: KR Serdes Parallel (Encoder). */ +/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ +/* enum: KR Serdes Serial. */ +/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ +/* enum: PMA lanes MAC-Serdes Wireside. */ +/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ +/* enum: KR Serdes Serial Wireside. */ +/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ +/* enum: Near side of AOE Siena side port */ +/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ +/* enum: Medford Wireside datapath loopback */ +/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 25G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 50 loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 100G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60 +/* Enum values, see field(s): */ +/* 100M */ + +/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */ +#define AN_TYPE_LEN 4 +#define AN_TYPE_TYPE_OFST 0 +#define AN_TYPE_TYPE_LEN 4 +/* enum: None, AN disabled or not supported */ +#define MC_CMD_AN_NONE 0x0 +/* enum: Clause 28 - BASE-T */ +#define MC_CMD_AN_CLAUSE28 0x1 +/* enum: Clause 37 - BASE-X */ +#define MC_CMD_AN_CLAUSE37 0x2 +/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable + * assemblies. Includes Clause 72/Clause 92 link-training. + */ +#define MC_CMD_AN_CLAUSE73 0x3 +#define AN_TYPE_TYPE_LBN 0 +#define AN_TYPE_TYPE_WIDTH 32 + +/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3 + */ +#define FEC_TYPE_LEN 4 +#define FEC_TYPE_TYPE_OFST 0 +#define FEC_TYPE_TYPE_LEN 4 +/* enum: No FEC */ +#define MC_CMD_FEC_NONE 0x0 +/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */ +#define MC_CMD_FEC_BASER 0x1 +/* enum: Clause 91/Clause 108 Reed-Solomon FEC */ +#define MC_CMD_FEC_RS 0x2 +#define FEC_TYPE_TYPE_LBN 0 +#define FEC_TYPE_TYPE_WIDTH 32 + /***********************************/ /* MC_CMD_GET_LINK @@ -5344,19 +3660,28 @@ /* MC_CMD_GET_LINK_OUT msgresponse */ #define MC_CMD_GET_LINK_OUT_LEN 28 -/* near-side advertised capabilities */ +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ #define MC_CMD_GET_LINK_OUT_CAP_OFST 0 -/* link-partner advertised capabilities */ +#define MC_CMD_GET_LINK_OUT_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ #define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4 /* Autonegotiated speed in mbit/s. The link may still be down even if this * reads non-zero. */ #define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4 /* Current loopback setting. */ #define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ #define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4 #define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 #define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 #define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 @@ -5371,9 +3696,11 @@ #define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 #define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 @@ -5383,6 +3710,97 @@ #define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 #define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 +/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */ +#define MC_CMD_GET_LINK_OUT_V2_LEN 44 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4 +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */ +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */ +/* True local device capabilities (taking into account currently used PMD/MDI, + * e.g. plugged-in module). In general, subset of + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST + * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal + * to SUPPORTED_CAP for non-pluggable PMDs. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28 +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4 +/* Auto-negotiation type used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32 +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* AN_TYPE/TYPE */ +/* Forward error correction used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36 +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1 + /***********************************/ /* MC_CMD_SET_LINK @@ -5396,10 +3814,14 @@ /* MC_CMD_SET_LINK_IN msgrequest */ #define MC_CMD_SET_LINK_IN_LEN 16 -/* ??? */ +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ #define MC_CMD_SET_LINK_IN_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_CAP_LEN 4 /* Flags */ #define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4 #define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 #define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 #define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1 @@ -5408,12 +3830,14 @@ #define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 /* Loopback mode. */ #define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ /* A loopback speed of "0" is supported, and means (choose any available * speed). */ #define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4 /* MC_CMD_SET_LINK_OUT msgresponse */ #define MC_CMD_SET_LINK_OUT_LEN 0 @@ -5432,9 +3856,10 @@ #define MC_CMD_SET_ID_LED_IN_LEN 4 /* Set LED state. */ #define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 -#define MC_CMD_LED_OFF 0x0 /* enum */ -#define MC_CMD_LED_ON 0x1 /* enum */ -#define MC_CMD_LED_DEFAULT 0x2 /* enum */ +#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4 +#define MC_CMD_LED_OFF 0x0 /* enum */ +#define MC_CMD_LED_ON 0x1 /* enum */ +#define MC_CMD_LED_DEFAULT 0x2 /* enum */ /* MC_CMD_SET_ID_LED_OUT msgresponse */ #define MC_CMD_SET_ID_LED_OUT_LEN 0 @@ -5455,17 +3880,21 @@ * EtherII, VLAN, bug16011 padding). */ #define MC_CMD_SET_MAC_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_IN_MTU_LEN 4 #define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4 #define MC_CMD_SET_MAC_IN_ADDR_OFST 8 #define MC_CMD_SET_MAC_IN_ADDR_LEN 8 #define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8 #define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12 #define MC_CMD_SET_MAC_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_LEN 4 #define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 #define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4 /* enum: Flow control is off. */ #define MC_CMD_FCNTL_OFF 0x0 /* enum: Respond to flow control. */ @@ -5479,6 +3908,7 @@ /* enum: Issue flow control. */ #define MC_CMD_FCNTL_GENERATE 0x5 #define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 @@ -5488,17 +3918,21 @@ * EtherII, VLAN, bug16011 padding). */ #define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8 #define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8 #define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8 #define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 #define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 #define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 #define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4 /* enum: Flow control is off. */ /* MC_CMD_FCNTL_OFF 0x0 */ /* enum: Respond to flow control. */ @@ -5512,6 +3946,7 @@ /* enum: Issue flow control. */ /* MC_CMD_FCNTL_GENERATE 0x5 */ #define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 /* Select which parameters to configure. A parameter will only be modified if @@ -5520,6 +3955,7 @@ * set). */ #define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4 #define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 #define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 @@ -5541,6 +3977,7 @@ * to 0. */ #define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0 +#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4 /***********************************/ @@ -5574,53 +4011,53 @@ #define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 #define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS /* enum: OUI. */ -#define MC_CMD_OUI 0x0 +#define MC_CMD_OUI 0x0 /* enum: PMA-PMD Link Up. */ -#define MC_CMD_PMA_PMD_LINK_UP 0x1 +#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum: PMA-PMD RX Fault. */ -#define MC_CMD_PMA_PMD_RX_FAULT 0x2 +#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum: PMA-PMD TX Fault. */ -#define MC_CMD_PMA_PMD_TX_FAULT 0x3 +#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum: PMA-PMD Signal */ -#define MC_CMD_PMA_PMD_SIGNAL 0x4 +#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum: PMA-PMD SNR A. */ -#define MC_CMD_PMA_PMD_SNR_A 0x5 +#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum: PMA-PMD SNR B. */ -#define MC_CMD_PMA_PMD_SNR_B 0x6 +#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum: PMA-PMD SNR C. */ -#define MC_CMD_PMA_PMD_SNR_C 0x7 +#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum: PMA-PMD SNR D. */ -#define MC_CMD_PMA_PMD_SNR_D 0x8 +#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum: PCS Link Up. */ -#define MC_CMD_PCS_LINK_UP 0x9 +#define MC_CMD_PCS_LINK_UP 0x9 /* enum: PCS RX Fault. */ -#define MC_CMD_PCS_RX_FAULT 0xa +#define MC_CMD_PCS_RX_FAULT 0xa /* enum: PCS TX Fault. */ -#define MC_CMD_PCS_TX_FAULT 0xb +#define MC_CMD_PCS_TX_FAULT 0xb /* enum: PCS BER. */ -#define MC_CMD_PCS_BER 0xc +#define MC_CMD_PCS_BER 0xc /* enum: PCS Block Errors. */ -#define MC_CMD_PCS_BLOCK_ERRORS 0xd +#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum: PhyXS Link Up. */ -#define MC_CMD_PHYXS_LINK_UP 0xe +#define MC_CMD_PHYXS_LINK_UP 0xe /* enum: PhyXS RX Fault. */ -#define MC_CMD_PHYXS_RX_FAULT 0xf +#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum: PhyXS TX Fault. */ -#define MC_CMD_PHYXS_TX_FAULT 0x10 +#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum: PhyXS Align. */ -#define MC_CMD_PHYXS_ALIGN 0x11 +#define MC_CMD_PHYXS_ALIGN 0x11 /* enum: PhyXS Sync. */ -#define MC_CMD_PHYXS_SYNC 0x12 +#define MC_CMD_PHYXS_SYNC 0x12 /* enum: AN link-up. */ -#define MC_CMD_AN_LINK_UP 0x13 +#define MC_CMD_AN_LINK_UP 0x13 /* enum: AN Complete. */ -#define MC_CMD_AN_COMPLETE 0x14 +#define MC_CMD_AN_COMPLETE 0x14 /* enum: AN 10GBaseT Status. */ -#define MC_CMD_AN_10GBT_STATUS 0x15 +#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum: Clause 22 Link-Up. */ -#define MC_CMD_CL22_LINK_UP 0x16 +#define MC_CMD_CL22_LINK_UP 0x16 /* enum: (Last entry) */ -#define MC_CMD_PHY_NSTATS 0x17 +#define MC_CMD_PHY_NSTATS 0x17 /***********************************/ @@ -5647,6 +4084,7 @@ #define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 #define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 #define MC_CMD_MAC_STATS_IN_CMD_OFST 8 +#define MC_CMD_MAC_STATS_IN_CMD_LEN 4 #define MC_CMD_MAC_STATS_IN_DMA_LBN 0 #define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1 #define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1 @@ -5661,9 +4099,16 @@ #define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 +/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as + * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not + * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to + * MC_CMD_MAC_NSTATS * sizeof(uint64_t) + */ #define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 +#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4 /* port id so vadapter stats can be provided */ #define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 +#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ #define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 @@ -5675,141 +4120,289 @@ #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS -#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ -#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ -#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ -#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ -#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ -#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ -#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ -#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ -#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ -#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ -#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ -#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ -#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ -#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ -#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ -#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ -#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ -#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ -#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ -#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ -#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ -#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ -#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ -#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ -#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ -#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ -#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ -#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ -#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ -#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ -#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ -#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ -#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ -#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ -#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ -#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ -#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ -#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ -#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ -#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ -#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ -#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ -#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ -#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ -#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ -#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ -#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ -#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ -#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ -#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ -#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ -#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ -#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ -#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ -#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ -#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ -#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ -#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ -#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ -#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ -#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ +#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ +#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ +#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ +#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ +#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ +#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ +#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ +#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ +#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ +#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ +#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ +#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ +#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ +#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ +#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ +#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ +#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ +#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ +#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ +#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ +#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ +#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ +#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ +#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ +#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ +#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ +#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ +#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ +#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ +#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ +#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ +#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ +#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ +#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ +#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ +#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ +#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ +#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ +#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ +#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ +#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ +#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ +#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ +#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ +#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ +#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ +#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ +#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ +#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ +#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ +#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ +#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ +#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ +#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ +#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ +#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ +#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ +#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ /* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c +#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c /* enum: PM discard_bb_overflow counter. Valid for EF10 with * PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d +#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d /* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e +#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e /* enum: PM discard_vfifo_full counter. Valid for EF10 with * PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f +#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f /* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 +#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 /* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 +#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 /* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS * capability only. */ -#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 +#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 /* enum: RXDP counter: Number of packets dropped due to the queue being * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 +#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 /* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 * with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 +#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 /* enum: RXDP counter: Number of non-host packets. Valid for EF10 with * PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 +#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 /* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 +#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 /* enum: RXDP counter: Number of times the DPCPU waited for an existing * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 -#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ -#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ -#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ +#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ /* enum: Start of GMAC stats buffer space, for Siena only. */ -#define MC_CMD_GMAC_DMABUF_START 0x40 +#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum: End of GMAC stats buffer space, for Siena only. */ -#define MC_CMD_GMAC_DMABUF_END 0x5f -#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */ -#define MC_CMD_MAC_NSTATS 0x61 /* enum */ +#define MC_CMD_GMAC_DMABUF_END 0x5f +/* enum: GENERATION_END value, used together with GENERATION_START to verify + * consistency of DMAd data. For legacy firmware / drivers without extended + * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise, + * this value is invalid/ reserved and GENERATION_END is written as the last + * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that + * this is consistent with the legacy behaviour, in the sense that entry 96 is + * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details. + */ +#define MC_CMD_MAC_GENERATION_END 0x60 +#define MC_CMD_MAC_NSTATS 0x61 /* enum */ + +/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3) +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2 +/* enum: Start of FEC stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_FEC_DMABUF_START 0x61 +/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61 +/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62 +/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63 +/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64 +/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65 +/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66 +/* enum: This includes the space at offset 103 which is the final + * GENERATION_END in a MAC_STATS_V2 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V2 0x68 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3) +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3 +/* enum: Start of CTPIO stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68 +/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the + * target VI + */ +#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68 +/* enum: Number of times a CTPIO send wrote beyond frame end (informational + * only) + */ +#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69 +/* enum: Number of CTPIO failures because the TX doorbell was written before + * the end of the frame data + */ +#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a +/* enum: Number of CTPIO failures because the internal FIFO overflowed */ +#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b +/* enum: Number of CTPIO failures because the host did not deliver data fast + * enough to avoid MAC underflow + */ +#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c +/* enum: Number of CTPIO failures because the host did not deliver all the + * frame data within the timeout + */ +#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d +/* enum: Number of CTPIO failures because the frame data arrived out of order + * or with gaps + */ +#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e +/* enum: Number of CTPIO failures because the host started a new frame before + * completing the previous one + */ +#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f +/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits + * or not 32-bit aligned + */ +#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70 +/* enum: Number of CTPIO fallbacks because another VI on the same port was + * sending a CTPIO frame + */ +#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71 +/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled + */ +#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72 +/* enum: Number of CTPIO fallbacks because length in header was less than 29 + * bytes + */ +#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73 +/* enum: Total number of successful CTPIO sends on this port */ +#define MC_CMD_MAC_CTPIO_SUCCESS 0x74 +/* enum: Total number of CTPIO fallbacks on this port */ +#define MC_CMD_MAC_CTPIO_FALLBACK 0x75 +/* enum: Total number of CTPIO poisoned frames on this port, whether erased or + * not + */ +#define MC_CMD_MAC_CTPIO_POISON 0x76 +/* enum: Total number of CTPIO erased frames on this port */ +#define MC_CMD_MAC_CTPIO_ERASE 0x77 +/* enum: This includes the space at offset 120 which is the final + * GENERATION_END in a MAC_STATS_V3 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V3 0x79 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3) +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4 +/* enum: Start of V4 stats buffer space */ +#define MC_CMD_MAC_V4_DMABUF_START 0x79 +/* enum: RXDP counter: Number of packets truncated because scattering was + * disabled. + */ +#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79 +/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting + * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a +/* enum: RXDP counter: Number of times the RXDP timed out while head of line + * blocking. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b +/* enum: This includes the space at offset 124 which is the final + * GENERATION_END in a MAC_STATS_V4 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V4 0x7d +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */ /***********************************/ @@ -5821,21 +4414,28 @@ /* MC_CMD_SRIOV_IN msgrequest */ #define MC_CMD_SRIOV_IN_LEN 12 #define MC_CMD_SRIOV_IN_ENABLE_OFST 0 +#define MC_CMD_SRIOV_IN_ENABLE_LEN 4 #define MC_CMD_SRIOV_IN_VI_BASE_OFST 4 +#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4 #define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8 +#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4 /* MC_CMD_SRIOV_OUT msgresponse */ #define MC_CMD_SRIOV_OUT_LEN 8 #define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0 +#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4 #define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4 /* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 /* this is only used for the first record */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8 @@ -5845,6 +4445,7 @@ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32 @@ -5855,6 +4456,7 @@ #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224 #define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32 @@ -5907,24 +4509,26 @@ /* MC_CMD_WOL_FILTER_SET_IN msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_LEN 192 #define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 -#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 +#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ #define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ /* A type value of 1 is unused. */ #define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 /* enum: Magic */ -#define MC_CMD_WOL_TYPE_MAGIC 0x0 +#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum: MS Windows Magic */ #define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum: IPv4 Syn */ -#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 +#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum: IPv6 Syn */ -#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 +#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum: Bitmap */ -#define MC_CMD_WOL_TYPE_BITMAP 0x5 +#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum: Link */ -#define MC_CMD_WOL_TYPE_LINK 0x6 +#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum: (Above this for future use) */ -#define MC_CMD_WOL_TYPE_MAX 0x7 +#define MC_CMD_WOL_TYPE_MAX 0x7 #define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 @@ -5932,7 +4536,9 @@ /* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8 #define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8 @@ -5941,9 +4547,13 @@ /* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2 #define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18 @@ -5952,7 +4562,9 @@ /* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16 #define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24 @@ -5965,7 +4577,9 @@ /* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48 #define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56 @@ -5980,8 +4594,11 @@ /* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12 /* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4 #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 #define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 @@ -5990,6 +4607,7 @@ /* MC_CMD_WOL_FILTER_SET_OUT msgresponse */ #define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 #define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4 /***********************************/ @@ -6004,6 +4622,7 @@ /* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */ #define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 #define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4 /* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */ #define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0 @@ -6022,6 +4641,7 @@ /* MC_CMD_WOL_FILTER_RESET_IN msgrequest */ #define MC_CMD_WOL_FILTER_RESET_IN_LEN 4 #define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4 #define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */ #define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */ @@ -6063,6 +4683,7 @@ #define MC_CMD_NVRAM_TYPES_OUT_LEN 4 /* Bit mask of supported types. */ #define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4 /* enum: Disabled callisto. */ #define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum: MC firmware. */ @@ -6120,21 +4741,28 @@ /* MC_CMD_NVRAM_INFO_IN msgrequest */ #define MC_CMD_NVRAM_INFO_IN_LEN 4 #define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ /* MC_CMD_NVRAM_INFO_OUT msgresponse */ #define MC_CMD_NVRAM_INFO_OUT_LEN 24 #define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5 #define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 @@ -6142,36 +4770,51 @@ #define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4 /* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */ #define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28 #define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0 #define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5 #define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4 /* Writes must be multiples of this size. Added to support the MUM on Sorrento. */ #define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24 +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4 /***********************************/ /* MC_CMD_NVRAM_UPDATE_START * Start a group of update operations on a virtual NVRAM partition. Locks * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if - * PHY_LOCK required and not held). + * PHY_LOCK required and not held). In an adapter bound to a TSA controller, + * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types + * i.e. static config, dynamic config and expansion ROM config. Attempting to + * perform this operation on a restricted partition will return the error + * EPERM. */ #define MC_CMD_NVRAM_UPDATE_START 0x38 #undef MC_CMD_0x38_PRIVILEGE_CTG @@ -6183,6 +4826,7 @@ */ #define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 #define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ @@ -6193,9 +4837,11 @@ */ #define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 @@ -6217,20 +4863,26 @@ /* MC_CMD_NVRAM_READ_IN msgrequest */ #define MC_CMD_NVRAM_READ_IN_LEN 12 #define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4 /* amount to read in bytes */ #define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4 /* MC_CMD_NVRAM_READ_IN_V2 msgrequest */ #define MC_CMD_NVRAM_READ_IN_V2_LEN 16 #define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4 /* amount to read in bytes */ #define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4 /* Optional control info. If a partition is stored with an A/B versioning * scheme (i.e. in more than one physical partition in NVRAM) the host can set * this to control which underlying physical partition is used to read data @@ -6240,6 +4892,7 @@ * verifying by reading with MODE=TARGET_BACKUP. */ #define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12 +#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4 /* enum: Same as omitting MODE: caller sees data in current partition unless it * holds the write lock in which case it sees data in the partition it is * updating. @@ -6280,10 +4933,13 @@ #define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 #define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) #define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4 #define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 @@ -6307,10 +4963,13 @@ /* MC_CMD_NVRAM_ERASE_IN msgrequest */ #define MC_CMD_NVRAM_ERASE_IN_LEN 12 #define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4 #define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4 /* MC_CMD_NVRAM_ERASE_OUT msgresponse */ #define MC_CMD_NVRAM_ERASE_OUT_LEN 0 @@ -6319,8 +4978,12 @@ /***********************************/ /* MC_CMD_NVRAM_UPDATE_FINISH * Finish a group of update operations on a virtual NVRAM partition. Locks - * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad - * type/offset/length), EACCES (if PHY_LOCK required and not held) + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/ + * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to + * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of + * partition types i.e. static config, dynamic config and expansion ROM config. + * Attempting to perform this operation on a restricted partition will return + * the error EPERM. */ #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c #undef MC_CMD_0x3c_PRIVILEGE_CTG @@ -6332,9 +4995,11 @@ */ #define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 #define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4 /* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH * request with additional flags indicating version of NVRAM_UPDATE commands in @@ -6343,10 +5008,13 @@ */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 @@ -6373,6 +5041,7 @@ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4 /* Result of nvram update completion processing */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4 /* enum: Invalid return code; only non-zero values are defined. Defined as * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT. */ @@ -6407,6 +5076,8 @@ * only production signed images. */ #define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc +/* enum: The image has a lower security level than the current firmware. */ +#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd /***********************************/ @@ -6430,11 +5101,12 @@ #define MC_CMD_REBOOT 0x3d #undef MC_CMD_0x3d_PRIVILEGE_CTG -#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_REBOOT_IN msgrequest */ #define MC_CMD_REBOOT_IN_LEN 4 #define MC_CMD_REBOOT_IN_FLAGS_OFST 0 +#define MC_CMD_REBOOT_IN_FLAGS_LEN 4 #define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */ /* MC_CMD_REBOOT_OUT msgresponse */ @@ -6473,11 +5145,12 @@ #define MC_CMD_REBOOT_MODE 0x3f #undef MC_CMD_0x3f_PRIVILEGE_CTG -#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_REBOOT_MODE_IN msgrequest */ #define MC_CMD_REBOOT_MODE_IN_LEN 4 #define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4 /* enum: Normal. */ #define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum: Power-on Reset. */ @@ -6492,6 +5165,7 @@ /* MC_CMD_REBOOT_MODE_OUT msgresponse */ #define MC_CMD_REBOOT_MODE_OUT_LEN 4 #define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4 /***********************************/ @@ -6528,7 +5202,7 @@ #define MC_CMD_SENSOR_INFO 0x41 #undef MC_CMD_0x41_PRIVILEGE_CTG -#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_SENSOR_INFO_IN msgrequest */ #define MC_CMD_SENSOR_INFO_IN_LEN 0 @@ -6542,174 +5216,190 @@ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. */ #define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4 /* MC_CMD_SENSOR_INFO_OUT msgresponse */ #define MC_CMD_SENSOR_INFO_OUT_LENMIN 4 #define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 #define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) #define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4 /* enum: Controller temperature: degC */ -#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 +#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum: Phy common temperature: degC */ -#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 +#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum: Controller cooling: bool */ -#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 +#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum: Phy 0 temperature: degC */ -#define MC_CMD_SENSOR_PHY0_TEMP 0x3 +#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum: Phy 0 cooling: bool */ -#define MC_CMD_SENSOR_PHY0_COOLING 0x4 +#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum: Phy 1 temperature: degC */ -#define MC_CMD_SENSOR_PHY1_TEMP 0x5 +#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum: Phy 1 cooling: bool */ -#define MC_CMD_SENSOR_PHY1_COOLING 0x6 +#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum: 1.0v power: mV */ -#define MC_CMD_SENSOR_IN_1V0 0x7 +#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum: 1.2v power: mV */ -#define MC_CMD_SENSOR_IN_1V2 0x8 +#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum: 1.8v power: mV */ -#define MC_CMD_SENSOR_IN_1V8 0x9 +#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum: 2.5v power: mV */ -#define MC_CMD_SENSOR_IN_2V5 0xa +#define MC_CMD_SENSOR_IN_2V5 0xa /* enum: 3.3v power: mV */ -#define MC_CMD_SENSOR_IN_3V3 0xb +#define MC_CMD_SENSOR_IN_3V3 0xb /* enum: 12v power: mV */ -#define MC_CMD_SENSOR_IN_12V0 0xc +#define MC_CMD_SENSOR_IN_12V0 0xc /* enum: 1.2v analogue power: mV */ -#define MC_CMD_SENSOR_IN_1V2A 0xd +#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum: reference voltage: mV */ -#define MC_CMD_SENSOR_IN_VREF 0xe +#define MC_CMD_SENSOR_IN_VREF 0xe /* enum: AOE FPGA power: mV */ -#define MC_CMD_SENSOR_OUT_VAOE 0xf +#define MC_CMD_SENSOR_OUT_VAOE 0xf /* enum: AOE FPGA temperature: degC */ -#define MC_CMD_SENSOR_AOE_TEMP 0x10 +#define MC_CMD_SENSOR_AOE_TEMP 0x10 /* enum: AOE FPGA PSU temperature: degC */ -#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 +#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 /* enum: AOE PSU temperature: degC */ -#define MC_CMD_SENSOR_PSU_TEMP 0x12 +#define MC_CMD_SENSOR_PSU_TEMP 0x12 /* enum: Fan 0 speed: RPM */ -#define MC_CMD_SENSOR_FAN_0 0x13 +#define MC_CMD_SENSOR_FAN_0 0x13 /* enum: Fan 1 speed: RPM */ -#define MC_CMD_SENSOR_FAN_1 0x14 +#define MC_CMD_SENSOR_FAN_1 0x14 /* enum: Fan 2 speed: RPM */ -#define MC_CMD_SENSOR_FAN_2 0x15 +#define MC_CMD_SENSOR_FAN_2 0x15 /* enum: Fan 3 speed: RPM */ -#define MC_CMD_SENSOR_FAN_3 0x16 +#define MC_CMD_SENSOR_FAN_3 0x16 /* enum: Fan 4 speed: RPM */ -#define MC_CMD_SENSOR_FAN_4 0x17 +#define MC_CMD_SENSOR_FAN_4 0x17 /* enum: AOE FPGA input power: mV */ -#define MC_CMD_SENSOR_IN_VAOE 0x18 +#define MC_CMD_SENSOR_IN_VAOE 0x18 /* enum: AOE FPGA current: mA */ -#define MC_CMD_SENSOR_OUT_IAOE 0x19 +#define MC_CMD_SENSOR_OUT_IAOE 0x19 /* enum: AOE FPGA input current: mA */ -#define MC_CMD_SENSOR_IN_IAOE 0x1a +#define MC_CMD_SENSOR_IN_IAOE 0x1a /* enum: NIC power consumption: W */ -#define MC_CMD_SENSOR_NIC_POWER 0x1b +#define MC_CMD_SENSOR_NIC_POWER 0x1b /* enum: 0.9v power voltage: mV */ -#define MC_CMD_SENSOR_IN_0V9 0x1c +#define MC_CMD_SENSOR_IN_0V9 0x1c /* enum: 0.9v power current: mA */ -#define MC_CMD_SENSOR_IN_I0V9 0x1d +#define MC_CMD_SENSOR_IN_I0V9 0x1d /* enum: 1.2v power current: mA */ -#define MC_CMD_SENSOR_IN_I1V2 0x1e +#define MC_CMD_SENSOR_IN_I1V2 0x1e /* enum: Not a sensor: reserved for the next page flag */ -#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f +#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f /* enum: 0.9v power voltage (at ADC): mV */ -#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 +#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 /* enum: Controller temperature 2: degC */ -#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 +#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 /* enum: Voltage regulator internal temperature: degC */ -#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 +#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 /* enum: 0.9V voltage regulator temperature: degC */ -#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 +#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 /* enum: 1.2V voltage regulator temperature: degC */ -#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 +#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 /* enum: controller internal temperature sensor voltage (internal ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 +#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 /* enum: controller internal temperature (internal ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 /* enum: controller internal temperature sensor voltage (external ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 /* enum: controller internal temperature (external ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 /* enum: ambient temperature: degC */ -#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 +#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 /* enum: air flow: bool */ -#define MC_CMD_SENSOR_AIRFLOW 0x2a +#define MC_CMD_SENSOR_AIRFLOW 0x2a /* enum: voltage between VSS08D and VSS08D at CSR: mV */ -#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b /* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */ -#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c /* enum: Hotpoint temperature: degC */ -#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d /* enum: Port 0 PHY power switch over-current: bool */ -#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e /* enum: Port 1 PHY power switch over-current: bool */ -#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f -/* enum: Mop-up microcontroller reference voltage (millivolts) */ -#define MC_CMD_SENSOR_MUM_VCC 0x30 +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage: mV */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 /* enum: 0.9v power phase A voltage: mV */ -#define MC_CMD_SENSOR_IN_0V9_A 0x31 +#define MC_CMD_SENSOR_IN_0V9_A 0x31 /* enum: 0.9v power phase A current: mA */ -#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 /* enum: 0.9V voltage regulator phase A temperature: degC */ -#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 /* enum: 0.9v power phase B voltage: mV */ -#define MC_CMD_SENSOR_IN_0V9_B 0x34 +#define MC_CMD_SENSOR_IN_0V9_B 0x34 /* enum: 0.9v power phase B current: mA */ -#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 /* enum: 0.9V voltage regulator phase B temperature: degC */ -#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 /* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 /* enum: CCOM AVREG 1v2 supply (external ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 /* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 /* enum: CCOM AVREG 1v8 supply (external ADC): mV */ -#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a /* enum: CCOM RTS temperature: degC */ -#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b +#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b /* enum: Not a sensor: reserved for the next page flag */ -#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f /* enum: controller internal temperature sensor voltage on master core * (internal ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 /* enum: controller internal temperature on master core (internal ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 /* enum: controller internal temperature sensor voltage on master core * (external ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 /* enum: controller internal temperature on master core (external ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 /* enum: controller internal temperature on slave core sensor voltage (internal * ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 /* enum: controller internal temperature on slave core (internal ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 /* enum: controller internal temperature on slave core sensor voltage (external * ADC): mV */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 /* enum: controller internal temperature on slave core (external ADC): degC */ -#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 /* enum: Voltage supplied to the SODIMMs from their power supply: mV */ -#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 /* enum: Temperature of SODIMM 0 (if installed): degC */ -#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a /* enum: Temperature of SODIMM 1 (if installed): degC */ -#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b /* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ -#define MC_CMD_SENSOR_PHY0_VCC 0x4c +#define MC_CMD_SENSOR_PHY0_VCC 0x4c /* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ -#define MC_CMD_SENSOR_PHY1_VCC 0x4d +#define MC_CMD_SENSOR_PHY1_VCC 0x4d /* enum: Controller die temperature (TDIODE): degC */ -#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e +#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e /* enum: Board temperature (front): degC */ -#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f +#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f /* enum: Board temperature (back): degC */ -#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 +#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 +/* enum: 1.8v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V8 0x51 +/* enum: 2.5v power current: mA */ +#define MC_CMD_SENSOR_IN_I2V5 0x52 +/* enum: 3.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I3V3 0x53 +/* enum: 12v power current: mA */ +#define MC_CMD_SENSOR_IN_I12V0 0x54 +/* enum: 1.3v power: mV */ +#define MC_CMD_SENSOR_IN_1V3 0x55 +/* enum: 1.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V3 0x56 +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 @@ -6723,6 +5413,7 @@ #define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 #define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) #define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SENSOR_INFO_OUT */ #define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31 @@ -6775,7 +5466,7 @@ #define MC_CMD_READ_SENSORS 0x42 #undef MC_CMD_0x42_PRIVILEGE_CTG -#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_READ_SENSORS_IN msgrequest */ #define MC_CMD_READ_SENSORS_IN_LEN 8 @@ -6794,6 +5485,7 @@ #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4 /* Size in bytes of host buffer. */ #define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4 /* MC_CMD_READ_SENSORS_OUT msgresponse */ #define MC_CMD_READ_SENSORS_OUT_LEN 0 @@ -6810,17 +5502,17 @@ #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 /* enum: Ok. */ -#define MC_CMD_SENSOR_STATE_OK 0x0 +#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum: Breached warning threshold. */ -#define MC_CMD_SENSOR_STATE_WARNING 0x1 +#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum: Breached fatal threshold. */ -#define MC_CMD_SENSOR_STATE_FATAL 0x2 +#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum: Fault with sensor. */ -#define MC_CMD_SENSOR_STATE_BROKEN 0x3 +#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum: Sensor is working but does not currently have a reading. */ -#define MC_CMD_SENSOR_STATE_NO_READING 0x4 +#define MC_CMD_SENSOR_STATE_NO_READING 0x4 /* enum: Sensor initialisation failed. */ -#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 @@ -6848,6 +5540,7 @@ /* MC_CMD_GET_PHY_STATE_OUT msgresponse */ #define MC_CMD_GET_PHY_STATE_OUT_LEN 4 #define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4 /* enum: Ok. */ #define MC_CMD_PHY_STATE_OK 0x1 /* enum: Faulty. */ @@ -6885,6 +5578,7 @@ /* MC_CMD_WOL_FILTER_GET_OUT msgresponse */ #define MC_CMD_WOL_FILTER_GET_OUT_LEN 4 #define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4 /***********************************/ @@ -6902,8 +5596,9 @@ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num)) #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 #define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ -#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1 @@ -6912,13 +5607,16 @@ /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10 @@ -6929,6 +5627,7 @@ /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4 /***********************************/ @@ -6944,7 +5643,9 @@ /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */ #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */ #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 @@ -6972,7 +5673,7 @@ #define MC_CMD_TESTASSERT 0x49 #undef MC_CMD_0x49_PRIVILEGE_CTG -#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_TESTASSERT_IN msgrequest */ #define MC_CMD_TESTASSERT_IN_LEN 0 @@ -6984,20 +5685,21 @@ #define MC_CMD_TESTASSERT_V2_IN_LEN 4 /* How to provoke the assertion */ #define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0 +#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4 /* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless * you're testing firmware, this is what you want. */ -#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 +#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 /* enum: Assert using assert(0); */ -#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 +#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 /* enum: Deliberately trigger a watchdog */ -#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 +#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 /* enum: Deliberately trigger a trap by loading from an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 +#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 /* enum: Deliberately trigger a trap by storing to an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 +#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 /* enum: Jump to an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 +#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 /* MC_CMD_TESTASSERT_V2_OUT msgresponse */ #define MC_CMD_TESTASSERT_V2_OUT_LEN 0 @@ -7020,6 +5722,7 @@ #define MC_CMD_WORKAROUND_IN_LEN 8 /* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ #define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 +#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4 /* enum: Bug 17230 work around. */ #define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum: Bug 35388 work around (unsafe EVQ writes). */ @@ -7048,6 +5751,7 @@ * the workaround */ #define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 +#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4 /* MC_CMD_WORKAROUND_OUT msgresponse */ #define MC_CMD_WORKAROUND_OUT_LEN 0 @@ -7057,6 +5761,7 @@ */ #define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 #define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4 #define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 #define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 @@ -7078,6 +5783,7 @@ /* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */ #define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 #define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4 /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 @@ -7085,6 +5791,7 @@ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) /* in bytes */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 @@ -7099,17 +5806,19 @@ #define MC_CMD_NVRAM_TEST 0x4c #undef MC_CMD_0x4c_PRIVILEGE_CTG -#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_NVRAM_TEST_IN msgrequest */ #define MC_CMD_NVRAM_TEST_IN_LEN 4 #define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ /* MC_CMD_NVRAM_TEST_OUT msgresponse */ #define MC_CMD_NVRAM_TEST_OUT_LEN 4 #define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4 /* enum: Passed. */ #define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum: Failed. */ @@ -7130,12 +5839,16 @@ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 /* 0-6 low->high de-emph. */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4 /* 0-8 low->high ref.V */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4 /* 0-8 0-8 low->high boost */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4 /* 0-8 low->high ref.V */ #define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4 /* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ #define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0 @@ -7144,10 +5857,13 @@ #define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 /* input bits */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4 /* output bits */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4 /* direction */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4 /* enum: Out. */ #define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum: In. */ @@ -7163,21 +5879,26 @@ #define MC_CMD_SENSOR_SET_LIMS 0x4e #undef MC_CMD_0x4e_PRIVILEGE_CTG -#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */ #define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 #define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4 /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4 /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4 /* interpretation is is sensor-specific. */ #define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4 /* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ #define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0 @@ -7194,9 +5915,13 @@ /* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */ #define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4 #define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4 /***********************************/ @@ -7218,6 +5943,7 @@ #define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num)) /* total number of partitions */ #define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4 /* type ID code for each of NUM_PARTITIONS partitions */ #define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4 #define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4 @@ -7239,6 +5965,7 @@ #define MC_CMD_NVRAM_METADATA_IN_LEN 4 /* Partition type ID code */ #define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4 /* MC_CMD_NVRAM_METADATA_OUT msgresponse */ #define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20 @@ -7246,7 +5973,9 @@ #define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num)) /* Partition type ID code */ #define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4 #define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4 #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0 #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1 #define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1 @@ -7255,6 +5984,7 @@ #define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1 /* Subtype ID code for content of this partition */ #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4 /* 1st component of W.X.Y.Z version number for content of this partition */ #define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12 #define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2 @@ -7296,8 +6026,10 @@ #define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2 /* Number of allocated MAC addresses */ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4 /* Spacing of allocated MAC addresses */ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4 /***********************************/ @@ -7307,12 +6039,13 @@ #define MC_CMD_CLP 0x56 #undef MC_CMD_0x56_PRIVILEGE_CTG -#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_CLP_IN msgrequest */ #define MC_CMD_CLP_IN_LEN 4 /* Sub operation */ #define MC_CMD_CLP_IN_OP_OFST 0 +#define MC_CMD_CLP_IN_OP_LEN 4 /* enum: Return to factory default settings */ #define MC_CMD_CLP_OP_DEFAULT 0x1 /* enum: Set MAC address */ @@ -7330,6 +6063,7 @@ /* MC_CMD_CLP_IN_DEFAULT msgrequest */ #define MC_CMD_CLP_IN_DEFAULT_LEN 4 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MC_CMD_CLP_OUT_DEFAULT msgresponse */ #define MC_CMD_CLP_OUT_DEFAULT_LEN 0 @@ -7337,6 +6071,7 @@ /* MC_CMD_CLP_IN_SET_MAC msgrequest */ #define MC_CMD_CLP_IN_SET_MAC_LEN 12 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MAC address assigned to port */ #define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 #define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 @@ -7350,6 +6085,7 @@ /* MC_CMD_CLP_IN_GET_MAC msgrequest */ #define MC_CMD_CLP_IN_GET_MAC_LEN 4 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MC_CMD_CLP_OUT_GET_MAC msgresponse */ #define MC_CMD_CLP_OUT_GET_MAC_LEN 8 @@ -7363,6 +6099,7 @@ /* MC_CMD_CLP_IN_SET_BOOT msgrequest */ #define MC_CMD_CLP_IN_SET_BOOT_LEN 5 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* Boot flag */ #define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 #define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 @@ -7373,6 +6110,7 @@ /* MC_CMD_CLP_IN_GET_BOOT msgrequest */ #define MC_CMD_CLP_IN_GET_BOOT_LEN 4 /* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ /* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ #define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 @@ -7391,11 +6129,12 @@ #define MC_CMD_MUM 0x57 #undef MC_CMD_0x57_PRIVILEGE_CTG -#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_MUM_IN msgrequest */ #define MC_CMD_MUM_IN_LEN 4 #define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_HDR_LEN 4 #define MC_CMD_MUM_IN_OP_LBN 0 #define MC_CMD_MUM_IN_OP_WIDTH 8 /* enum: NULL MCDI command to MUM */ @@ -7435,26 +6174,32 @@ #define MC_CMD_MUM_IN_NULL_LEN 4 /* MUM cmd header */ #define MC_CMD_MUM_IN_CMD_OFST 0 +#define MC_CMD_MUM_IN_CMD_LEN 4 /* MC_CMD_MUM_IN_GET_VERSION msgrequest */ #define MC_CMD_MUM_IN_GET_VERSION_LEN 4 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_IN_READ msgrequest */ #define MC_CMD_MUM_IN_READ_LEN 16 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* ID of (device connected to MUM) to read from registers of */ #define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4 /* enum: Hittite HMC1035 clock generator on Sorrento board */ #define MC_CMD_MUM_DEV_HITTITE 0x1 /* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ #define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 /* 32-bit address to read from */ #define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +#define MC_CMD_MUM_IN_READ_ADDR_LEN 4 /* Number of words to read. */ #define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 +#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4 /* MC_CMD_MUM_IN_WRITE msgrequest */ #define MC_CMD_MUM_IN_WRITE_LENMIN 16 @@ -7462,12 +6207,15 @@ #define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* ID of (device connected to MUM) to write to registers of */ #define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4 /* enum: Hittite HMC1035 clock generator on Sorrento board */ /* MC_CMD_MUM_DEV_HITTITE 0x1 */ /* 32-bit address to write to */ #define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4 /* Words to write */ #define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 #define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 @@ -7480,12 +6228,16 @@ #define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MUM I2C cmd code */ #define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4 /* Number of bytes to write */ #define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4 /* Number of bytes to read */ #define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4 /* Bytes to write */ #define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 #define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 @@ -7496,21 +6248,28 @@ #define MC_CMD_MUM_IN_LOG_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_LOG_OP_OFST 4 -#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ +#define MC_CMD_MUM_IN_LOG_OP_LEN 4 +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ /* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ #define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */ /* Enable/disable debug output to UART */ #define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4 /* MC_CMD_MUM_IN_GPIO msgrequest */ #define MC_CMD_MUM_IN_GPIO_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 #define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 #define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ @@ -7523,40 +6282,56 @@ /* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4 /* The first 32-bit word to be written to the GPIO OUT register. */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4 /* The second 32-bit word to be written to the GPIO OUT register. */ #define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4 /* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4 /* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OP msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 #define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 #define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ @@ -7569,26 +6344,34 @@ /* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4 /* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 /* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 /* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 @@ -7596,7 +6379,9 @@ #define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4 #define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 #define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 #define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 @@ -7606,13 +6391,16 @@ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* Bit-mask of clocks to be programmed */ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4 #define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ #define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ #define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ /* Control flags for clock programming */ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 @@ -7624,19 +6412,24 @@ #define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* Enable/Disable FPGA config from flash */ #define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4 /* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ #define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_IN_QSFP msgrequest */ #define MC_CMD_MUM_IN_QSFP_LEN 12 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 #define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 #define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ @@ -7646,52 +6439,77 @@ #define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ #define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ #define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4 /* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ #define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4 #define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4 /* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4 #define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4 /* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ #define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4 /* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4 #define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4 /* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ #define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4 /* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ #define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4 #define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4 /* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */ #define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4 /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ /* MC_CMD_MUM_OUT msgresponse */ #define MC_CMD_MUM_OUT_LEN 0 @@ -7702,6 +6520,7 @@ /* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ #define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 #define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4 #define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 #define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 #define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 @@ -7739,8 +6558,10 @@ #define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 /* The first 32-bit word read from the GPIO IN register. */ #define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4 /* The second 32-bit word read from the GPIO IN register. */ #define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 @@ -7749,8 +6570,10 @@ #define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 /* The first 32-bit word read from the GPIO OUT register. */ #define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4 /* The second 32-bit word read from the GPIO OUT register. */ #define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 @@ -7758,11 +6581,14 @@ /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4 #define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 #define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ #define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 @@ -7791,6 +6617,7 @@ /* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ #define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 #define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4 /* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ #define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 @@ -7798,6 +6625,7 @@ /* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ #define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 #define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4 /* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ #define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 @@ -7805,7 +6633,9 @@ /* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 @@ -7814,6 +6644,7 @@ /* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ #define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 #define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4 /* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 @@ -7821,6 +6652,7 @@ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) /* in bytes */ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 @@ -7829,11 +6661,14 @@ /* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ #define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 #define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4 #define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4 /* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ #define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 #define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4 /* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 @@ -7841,12 +6676,14 @@ #define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) /* Discrete (soldered) DDR resistor strap info */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4 #define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 #define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 /* Number of SODIMM info records */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4 /* Array of SODIMM info records */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8 @@ -7907,18 +6744,19 @@ /* EVB_PORT_ID structuredef */ #define EVB_PORT_ID_LEN 4 #define EVB_PORT_ID_PORT_ID_OFST 0 +#define EVB_PORT_ID_PORT_ID_LEN 4 /* enum: An invalid port handle. */ -#define EVB_PORT_ID_NULL 0x0 +#define EVB_PORT_ID_NULL 0x0 /* enum: The port assigned to this function.. */ -#define EVB_PORT_ID_ASSIGNED 0x1000000 +#define EVB_PORT_ID_ASSIGNED 0x1000000 /* enum: External network port 0 */ -#define EVB_PORT_ID_MAC0 0x2000000 +#define EVB_PORT_ID_MAC0 0x2000000 /* enum: External network port 1 */ -#define EVB_PORT_ID_MAC1 0x2000001 +#define EVB_PORT_ID_MAC1 0x2000001 /* enum: External network port 2 */ -#define EVB_PORT_ID_MAC2 0x2000002 +#define EVB_PORT_ID_MAC2 0x2000002 /* enum: External network port 3 */ -#define EVB_PORT_ID_MAC3 0x2000003 +#define EVB_PORT_ID_MAC3 0x2000003 #define EVB_PORT_ID_PORT_ID_LBN 0 #define EVB_PORT_ID_PORT_ID_WIDTH 32 @@ -7930,7 +6768,7 @@ #define EVB_VLAN_TAG_MODE_LBN 12 #define EVB_VLAN_TAG_MODE_WIDTH 4 /* enum: Insert the VLAN. */ -#define EVB_VLAN_TAG_INSERT 0x0 +#define EVB_VLAN_TAG_INSERT 0x0 /* enum: Replace the VLAN if already present. */ #define EVB_VLAN_TAG_REPLACE 0x1 @@ -7959,121 +6797,149 @@ #define NVRAM_PARTITION_TYPE_ID_OFST 0 #define NVRAM_PARTITION_TYPE_ID_LEN 2 /* enum: Primary MC firmware partition */ -#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 /* enum: Secondary MC firmware partition */ -#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 /* enum: Expansion ROM partition */ -#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 /* enum: Static configuration TLV partition */ -#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 +#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 /* enum: Dynamic configuration TLV partition */ -#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 +#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 /* enum: Expansion ROM configuration data for port 0 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 /* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 /* enum: Expansion ROM configuration data for port 1 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 /* enum: Expansion ROM configuration data for port 2 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 /* enum: Expansion ROM configuration data for port 3 */ -#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 /* enum: Non-volatile log output partition */ -#define NVRAM_PARTITION_TYPE_LOG 0x700 +#define NVRAM_PARTITION_TYPE_LOG 0x700 /* enum: Non-volatile log output of second core on dual-core device */ -#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 +#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 /* enum: Device state dump output partition */ -#define NVRAM_PARTITION_TYPE_DUMP 0x800 +#define NVRAM_PARTITION_TYPE_DUMP 0x800 /* enum: Application license key storage partition */ -#define NVRAM_PARTITION_TYPE_LICENSE 0x900 +#define NVRAM_PARTITION_TYPE_LICENSE 0x900 /* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */ -#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 +#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ -#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff /* enum: Primary FPGA partition */ -#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 /* enum: Secondary FPGA partition */ -#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 /* enum: FC firmware partition */ -#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 /* enum: FC License partition */ -#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 /* enum: Non-volatile log output partition for FC */ -#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 /* enum: MUM firmware partition */ -#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: SUC firmware partition (this is intentionally an alias of + * MUM_FIRMWARE) + */ +#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00 /* enum: MUM Non-volatile log output partition. */ -#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 /* enum: MUM Application table partition. */ -#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 /* enum: MUM boot rom partition. */ -#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 /* enum: MUM production signatures & calibration rom partition. */ -#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 /* enum: MUM user signatures & calibration rom partition. */ -#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 /* enum: MUM fuses and lockbits partition. */ -#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 /* enum: UEFI expansion ROM if separate from PXE */ -#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 -/* enum: Spare partition 0 */ -#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000 +#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 +/* enum: Used by the expansion ROM for logging */ +#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000 /* enum: Used for XIP code of shmbooted images */ -#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 +#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 /* enum: Spare partition 2 */ -#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 +#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 /* enum: Manufacturing partition. Used during manufacture to pass information * between XJTAG and Manftest. */ -#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300 +#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300 /* enum: Spare partition 4 */ -#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 +#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 /* enum: Spare partition 5 */ -#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 +#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 /* enum: Partition for reporting MC status. See mc_flash_layout.h * medford_mc_status_hdr_t for layout on Medford. */ -#define NVRAM_PARTITION_TYPE_STATUS 0x1600 +#define NVRAM_PARTITION_TYPE_STATUS 0x1600 +/* enum: Spare partition 13 */ +#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700 +/* enum: Spare partition 14 */ +#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800 +/* enum: Spare partition 15 */ +#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900 +/* enum: Spare partition 16 */ +#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00 +/* enum: Factory defaults for dynamic configuration */ +#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00 +/* enum: Factory defaults for expansion ROM configuration */ +#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00 +/* enum: Field Replaceable Unit inventory information for use on IPMI + * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a + * subset of the information stored in this partition. + */ +#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 /* enum: Start of reserved value range (firmware may use for any purpose) */ -#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ -#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd /* enum: Recovery partition map (provided if real map is missing or corrupt) */ -#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe +#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe /* enum: Partition map (real map as stored in flash) */ -#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff +#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff #define NVRAM_PARTITION_TYPE_ID_LBN 0 #define NVRAM_PARTITION_TYPE_ID_WIDTH 16 /* LICENSED_APP_ID structuredef */ #define LICENSED_APP_ID_LEN 4 #define LICENSED_APP_ID_ID_OFST 0 +#define LICENSED_APP_ID_ID_LEN 4 /* enum: OpenOnload */ -#define LICENSED_APP_ID_ONLOAD 0x1 +#define LICENSED_APP_ID_ONLOAD 0x1 /* enum: PTP timestamping */ -#define LICENSED_APP_ID_PTP 0x2 +#define LICENSED_APP_ID_PTP 0x2 /* enum: SolarCapture Pro */ -#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 /* enum: SolarSecure filter engine */ -#define LICENSED_APP_ID_SOLARSECURE 0x8 +#define LICENSED_APP_ID_SOLARSECURE 0x8 /* enum: Performance monitor */ -#define LICENSED_APP_ID_PERF_MONITOR 0x10 +#define LICENSED_APP_ID_PERF_MONITOR 0x10 /* enum: SolarCapture Live */ -#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 /* enum: Capture SolarSystem */ -#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 /* enum: Network Access Control */ -#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 /* enum: TCP Direct */ -#define LICENSED_APP_ID_TCP_DIRECT 0x100 +#define LICENSED_APP_ID_TCP_DIRECT 0x100 /* enum: Low Latency */ -#define LICENSED_APP_ID_LOW_LATENCY 0x200 +#define LICENSED_APP_ID_LOW_LATENCY 0x200 /* enum: SolarCapture Tap */ -#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 +#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 /* enum: Capture SolarSystem 40G */ #define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800 /* enum: Capture SolarSystem 1G */ -#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 +/* enum: ScaleOut Onload */ +#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000 +/* enum: SCS Network Analytics Dashboard */ +#define LICENSED_APP_ID_DSHBRD 0x4000 +/* enum: SolarCapture Trading Analytics */ +#define LICENSED_APP_ID_SCATRD 0x8000 #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 @@ -8140,6 +7006,12 @@ #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_DSHBRD_LBN 14 +#define LICENSED_V3_APPS_DSHBRD_WIDTH 1 +#define LICENSED_V3_APPS_SCATRD_LBN 15 +#define LICENSED_V3_APPS_SCATRD_WIDTH 1 #define LICENSED_V3_APPS_MASK_LBN 0 #define LICENSED_V3_APPS_MASK_WIDTH 64 @@ -8185,11 +7057,23 @@ #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 /* enum: This is a TX completion event, not a timestamp */ -#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is a TX completion event for a CTPIO transmit. The event format + * is the same as for TX_EV_COMPLETION. + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11 +/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_LO + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12 +/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_HI + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13 /* enum: This is the low part of a TX timestamp event */ -#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 /* enum: This is the high part of a TX timestamp event */ -#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 #define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 /* upper 16 bits of timestamp data */ @@ -8232,6 +7116,42 @@ #define CTPIO_STATS_MAP_BUCKET_LBN 16 #define CTPIO_STATS_MAP_BUCKET_WIDTH 16 +/* MESSAGE_TYPE structuredef: When present this defines the meaning of a + * message, and is used to protect against chosen message attacks in signed + * messages, regardless their origin. The message type also defines the + * signature cryptographic algorithm, encoding, and message fields included in + * the signature. The values are used in different commands but must be unique + * across all commands, e.g. MC_CMD_TSA_BIND_IN_SECURE_UNBIND uses different + * message type than MC_CMD_SECURE_NIC_INFO_IN_STATUS. + */ +#define MESSAGE_TYPE_LEN 4 +#define MESSAGE_TYPE_MESSAGE_TYPE_OFST 0 +#define MESSAGE_TYPE_MESSAGE_TYPE_LEN 4 +#define MESSAGE_TYPE_UNUSED 0x0 /* enum */ +/* enum: Message type value for the response to a + * MC_CMD_TSA_BIND_IN_SECURE_UNBIND message. TSA_SECURE_UNBIND messages are + * ECDSA SECP384R1 signed using SHA384 message digest algorithm over fields + * MESSAGE_TYPE, TSANID, TSAID, and UNBINDTOKEN, and encoded as suggested by + * RFC6979 (section 2.4). + */ +#define MESSAGE_TYPE_TSA_SECURE_UNBIND 0x1 +/* enum: Message type value for the response to a + * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION message. TSA_SECURE_DECOMMISSION + * messages are ECDSA SECP384R1 signed using SHA384 message digest algorithm + * over fields MESSAGE_TYPE, TSAID, USER, and REASON, and encoded as suggested + * by RFC6979 (section 2.4). + */ +#define MESSAGE_TYPE_TSA_SECURE_DECOMMISSION 0x2 +/* enum: Message type value for the response to a + * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. This enum value is not sequential + * to other message types for backwards compatibility as the message type for + * MC_CMD_SECURE_NIC_INFO_IN_STATUS was defined before the existence of this + * global enum. + */ +#define MESSAGE_TYPE_SECURE_NIC_INFO_STATUS 0xdb4 +#define MESSAGE_TYPE_MESSAGE_TYPE_LBN 0 +#define MESSAGE_TYPE_MESSAGE_TYPE_WIDTH 32 + /***********************************/ /* MC_CMD_READ_REGS @@ -8240,7 +7160,7 @@ #define MC_CMD_READ_REGS 0x50 #undef MC_CMD_0x50_PRIVILEGE_CTG -#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_READ_REGS_IN msgrequest */ #define MC_CMD_READ_REGS_IN_LEN 0 @@ -8274,17 +7194,22 @@ #define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4 /* The initial timer value. The load value is ignored if the timer mode is DIS. */ #define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4 /* The reload value is ignored in one-shot modes */ #define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4 /* tbd */ #define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4 #define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 #define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 @@ -8300,6 +7225,7 @@ #define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 #define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 /* enum: Immediate */ @@ -8310,13 +7236,16 @@ #define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 /* Target EVQ for wakeups if in wakeup mode. */ #define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4 /* Target interrupt if in interrupting mode (note union with target EVQ). Use * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test * purposes. */ #define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4 /* Event Counter Mode. */ #define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0 /* enum: Disabled */ @@ -8327,6 +7256,7 @@ #define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3 /* Event queue packet count threshold. */ #define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36 #define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8 @@ -8339,6 +7269,7 @@ #define MC_CMD_INIT_EVQ_OUT_LEN 4 /* Only valid if INTRFLAG was true */ #define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4 /* MC_CMD_INIT_EVQ_V2_IN msgrequest */ #define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 @@ -8346,17 +7277,22 @@ #define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4 /* The initial timer value. The load value is ignored if the timer mode is DIS. */ #define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4 /* The reload value is ignored in one-shot modes */ #define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4 /* tbd */ #define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 @@ -8393,6 +7329,7 @@ */ #define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 #define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0 /* enum: Immediate */ @@ -8403,13 +7340,16 @@ #define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3 /* Target EVQ for wakeups if in wakeup mode. */ #define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4 /* Target interrupt if in interrupting mode (note union with target EVQ). Use * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test * purposes. */ #define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4 /* Event Counter Mode. */ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0 /* enum: Disabled */ @@ -8420,6 +7360,7 @@ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3 /* Event queue packet count threshold. */ #define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36 #define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8 @@ -8432,8 +7373,10 @@ #define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 /* Only valid if INTRFLAG was true */ #define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4 /* Actual configuration applied on the card */ #define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 @@ -8448,17 +7391,17 @@ #define QUEUE_CRC_MODE_MODE_LBN 0 #define QUEUE_CRC_MODE_MODE_WIDTH 4 /* enum: No CRC. */ -#define QUEUE_CRC_MODE_NONE 0x0 +#define QUEUE_CRC_MODE_NONE 0x0 /* enum: CRC Fiber channel over ethernet. */ -#define QUEUE_CRC_MODE_FCOE 0x1 +#define QUEUE_CRC_MODE_FCOE 0x1 /* enum: CRC (digest) iSCSI header only. */ -#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 +#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 /* enum: CRC (digest) iSCSI header and payload. */ -#define QUEUE_CRC_MODE_ISCSI 0x3 +#define QUEUE_CRC_MODE_ISCSI 0x3 /* enum: CRC Fiber channel over IP over ethernet. */ -#define QUEUE_CRC_MODE_FCOIPOE 0x4 +#define QUEUE_CRC_MODE_FCOIPOE 0x4 /* enum: CRC MPA. */ -#define QUEUE_CRC_MODE_MPA 0x5 +#define QUEUE_CRC_MODE_MPA 0x5 #define QUEUE_CRC_MODE_SPARE_LBN 4 #define QUEUE_CRC_MODE_SPARE_WIDTH 4 @@ -8482,17 +7425,22 @@ #define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ */ #define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. */ #define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4 #define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 @@ -8511,8 +7459,10 @@ #define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8 @@ -8527,17 +7477,26 @@ #define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 /* Size, in entries */ #define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 -/* The EVQ to send events to. This is an index originally specified to INIT_EVQ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. */ #define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 -/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ #define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 @@ -8555,26 +7514,37 @@ #define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 #define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 /* enum: One packet per descriptor (for normal networking) */ -#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 /* enum: Pack multiple packets into large descriptors (for SolarCapture) */ -#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 #define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 #define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ -#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 @@ -8583,6 +7553,116 @@ #define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 /* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ #define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4 + +/* MC_CMD_INIT_RXQ_V3_IN msgrequest */ +#define MC_CMD_INIT_RXQ_V3_IN_LEN 560 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 /* MC_CMD_INIT_RXQ_OUT msgresponse */ #define MC_CMD_INIT_RXQ_OUT_LEN 0 @@ -8590,6 +7670,9 @@ /* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ #define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 +/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0 + /***********************************/ /* MC_CMD_INIT_TXQ @@ -8607,18 +7690,23 @@ #define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) /* Size, in entries */ #define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4 /* The EVQ to send events to. This is an index originally specified to * INIT_EVQ. */ #define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. */ #define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4 #define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 @@ -8639,8 +7727,10 @@ #define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8 @@ -8655,18 +7745,23 @@ #define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 /* Size, in entries */ #define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4 /* The EVQ to send events to. This is an index originally specified to * INIT_EVQ. */ #define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. */ #define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4 /* Desired instance. Must be set to a specific instance, which is a function * local queue index. */ #define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4 /* There will be more flags here. */ #define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 @@ -8689,10 +7784,14 @@ #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ #define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4 /* 64-bit address of 4k of 4k-aligned host memory buffer */ #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 @@ -8702,6 +7801,7 @@ #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 /* Flags related to Qbb flow control mode. */ #define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 @@ -8729,6 +7829,7 @@ * passed to INIT_EVQ */ #define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4 /* MC_CMD_FINI_EVQ_OUT msgresponse */ #define MC_CMD_FINI_EVQ_OUT_LEN 0 @@ -8747,6 +7848,7 @@ #define MC_CMD_FINI_RXQ_IN_LEN 4 /* Instance of RXQ to destroy */ #define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4 /* MC_CMD_FINI_RXQ_OUT msgresponse */ #define MC_CMD_FINI_RXQ_OUT_LEN 0 @@ -8765,6 +7867,7 @@ #define MC_CMD_FINI_TXQ_IN_LEN 4 /* Instance of TXQ to destroy */ #define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4 /* MC_CMD_FINI_TXQ_OUT msgresponse */ #define MC_CMD_FINI_TXQ_OUT_LEN 0 @@ -8783,6 +7886,7 @@ #define MC_CMD_DRIVER_EVENT_IN_LEN 12 /* Handle of target EVQ */ #define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0 +#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4 /* Bits 0 - 63 of event */ #define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4 #define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8 @@ -8809,11 +7913,12 @@ #define MC_CMD_PROXY_CMD_IN_LEN 4 /* The handle of the target function. */ #define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4 #define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0 #define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16 #define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16 #define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16 -#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ +#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ /* MC_CMD_PROXY_CMD_OUT msgresponse */ #define MC_CMD_PROXY_CMD_OUT_LEN 0 @@ -8824,8 +7929,9 @@ #define MC_PROXY_STATUS_BUFFER_LEN 16 /* Handle allocated by the firmware for this proxy transaction */ #define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4 /* enum: An invalid handle. */ -#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 #define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 #define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32 /* The requesting physical function number */ @@ -8854,6 +7960,7 @@ * elevated privilege mask granted to the requesting function. */ #define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4 #define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96 #define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32 @@ -8871,6 +7978,7 @@ /* MC_CMD_PROXY_CONFIGURE_IN msgrequest */ #define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 #define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4 #define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 #define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS @@ -8882,6 +7990,7 @@ #define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size REPLY_BLOCK_SIZE. */ @@ -8891,6 +8000,7 @@ #define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. @@ -8901,8 +8011,10 @@ #define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32 /* Must be a power of 2, or zero if this buffer is not provided */ #define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4 /* Applies to all three buffers */ #define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40 +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4 /* A bit mask defining which MCDI operations may be proxied */ #define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 #define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 @@ -8910,6 +8022,7 @@ /* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS @@ -8921,6 +8034,7 @@ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size REPLY_BLOCK_SIZE. */ @@ -8930,6 +8044,7 @@ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20 /* Must be a power of 2 */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. @@ -8940,12 +8055,15 @@ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32 /* Must be a power of 2, or zero if this buffer is not provided */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4 /* Applies to all three buffers */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4 /* A bit mask defining which MCDI operations may be proxied */ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4 /* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ #define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 @@ -8966,7 +8084,9 @@ /* MC_CMD_PROXY_COMPLETE_IN msgrequest */ #define MC_CMD_PROXY_COMPLETE_IN_LEN 12 #define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4 #define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4 /* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply * is stored in the REPLY_BUFF. */ @@ -8982,6 +8102,7 @@ */ #define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3 #define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4 /* MC_CMD_PROXY_COMPLETE_OUT msgresponse */ #define MC_CMD_PROXY_COMPLETE_OUT_LEN 0 @@ -9002,17 +8123,22 @@ #define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 /* Owner ID to use */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4 /* Size of buffer table pages to use, in bytes (note that only a few values are * legal on any specific hardware). */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4 /* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12 #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4 #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4 /* Buffer table IDs for use in DMA descriptors. */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4 /***********************************/ @@ -9029,10 +8155,13 @@ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4 /* ID */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 /* Num entries */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 /* Buffer table entry address */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8 @@ -9056,48 +8185,11 @@ /* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ #define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 #define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ #define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 -/* PORT_CONFIG_ENTRY structuredef */ -#define PORT_CONFIG_ENTRY_LEN 16 -/* External port number (label) */ -#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0 -#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1 -#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0 -#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8 -/* Port core location */ -#define PORT_CONFIG_ENTRY_CORE_OFST 1 -#define PORT_CONFIG_ENTRY_CORE_LEN 1 -#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */ -#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */ -#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */ -#define PORT_CONFIG_ENTRY_CORE_LBN 8 -#define PORT_CONFIG_ENTRY_CORE_WIDTH 8 -/* Internal number (HW resource) relative to the core */ -#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2 -#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1 -#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16 -#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8 -/* Reserved */ -#define PORT_CONFIG_ENTRY_RSVD_OFST 3 -#define PORT_CONFIG_ENTRY_RSVD_LEN 1 -#define PORT_CONFIG_ENTRY_RSVD_LBN 24 -#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8 -/* Bitmask of KR lanes used by the port */ -#define PORT_CONFIG_ENTRY_LANES_OFST 4 -#define PORT_CONFIG_ENTRY_LANES_LBN 32 -#define PORT_CONFIG_ENTRY_LANES_WIDTH 32 -/* Port capabilities (MC_CMD_PHY_CAP_*) */ -#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8 -#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64 -#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32 -/* Reserved (align to 16 bytes) */ -#define PORT_CONFIG_ENTRY_RSVD2_OFST 12 -#define PORT_CONFIG_ENTRY_RSVD2_LBN 96 -#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32 - /***********************************/ /* MC_CMD_FILTER_OP @@ -9112,18 +8204,19 @@ #define MC_CMD_FILTER_OP_IN_LEN 108 /* identifies the type of operation requested */ #define MC_CMD_FILTER_OP_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_IN_OP_LEN 4 /* enum: single-recipient filter insert */ -#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 +#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 /* enum: single-recipient filter remove */ -#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 +#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 /* enum: multi-recipient filter subscribe */ -#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 +#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 /* enum: multi-recipient filter unsubscribe */ -#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 +#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 /* enum: replace one recipient with another (warning - the filter handle may * change) */ -#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 +#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 /* filter handle (for remove / unsubscribe operations) */ #define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4 #define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8 @@ -9132,8 +8225,10 @@ /* The port ID associated with the v-adaptor which should contain this filter. */ #define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4 /* fields to include in match criteria */ #define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 #define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 @@ -9164,43 +8259,49 @@ #define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 /* receive destination */ #define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4 /* enum: drop packets */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 +#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 /* enum: receive to host */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 +#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 /* enum: receive to MC */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 +#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 /* enum: loop back to TXDP 0 */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 /* enum: loop back to TXDP 1 */ -#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 +#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 /* enum: receive to multiple queues using .1p mapping */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 +#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 /* enum: install a filter entry that will never match; for test purposes only */ -#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or * MC_CMD_DOT1P_MAPPING_ALLOC. */ #define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4 /* transmit domain (reserved; set to 0) */ #define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4 /* transmit destination (either set the MAC and/or PM bits for explicit * control, or set this field to TX_DEST_DEFAULT for sensible default * behaviour) */ #define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ -#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff #define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 #define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 #define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 @@ -9231,8 +8332,10 @@ #define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2 /* Firmware defined register 0 to match (reserved; set to 0) */ #define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4 /* Firmware defined register 1 to match (reserved; set to 0) */ #define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72 +#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4 /* source IP address to match (as bytes in network order; set last 12 bytes to * 0 for IPv4 address) */ @@ -9251,6 +8354,7 @@ #define MC_CMD_FILTER_OP_EXT_IN_LEN 172 /* identifies the type of operation requested */ #define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_FILTER_OP_IN/OP */ /* filter handle (for remove / unsubscribe operations) */ @@ -9261,8 +8365,10 @@ /* The port ID associated with the v-adaptor which should contain this filter. */ #define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4 /* fields to include in match criteria */ #define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 @@ -9321,43 +8427,49 @@ #define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 /* receive destination */ #define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4 /* enum: drop packets */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 /* enum: receive to host */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 /* enum: receive to MC */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 /* enum: loop back to TXDP 0 */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 /* enum: loop back to TXDP 1 */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 /* enum: receive to multiple queues using .1p mapping */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 /* enum: install a filter entry that will never match; for test purposes only */ -#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or * MC_CMD_DOT1P_MAPPING_ALLOC. */ #define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4 /* transmit domain (reserved; set to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4 /* transmit destination (either set the MAC and/or PM bits for explicit * control, or set this field to TX_DEST_DEFAULT for sensible default * behaviour) */ #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ -#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 @@ -9388,27 +8500,29 @@ #define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 /* Firmware defined register 0 to match (reserved; set to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4 /* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP * protocol is GRE) to match (as bytes in network order; set last byte to 0 for * VXLAN/NVGRE, or 1 for Geneve) */ #define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4 #define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 /* enum: Match VXLAN traffic with this VNI */ -#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 /* enum: Match Geneve traffic with this VNI */ -#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 /* enum: Reserved for experimental development use */ -#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe #define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 #define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 #define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 /* enum: Match NVGRE traffic with this VSID */ -#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 /* source IP address to match (as bytes in network order; set last 12 bytes to * 0 for IPv4 address) */ @@ -9458,10 +8572,12 @@ * to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4 /* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set * to 0) */ #define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4 /* VXLAN/NVGRE inner frame source IP address to match (as bytes in network * order; set last 12 bytes to 0 for IPv4 address) */ @@ -9473,82 +8589,357 @@ #define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 #define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 -/* MC_CMD_FILTER_OP_OUT msgresponse */ -#define MC_CMD_FILTER_OP_OUT_LEN 12 -/* identifies the type of operation requested */ -#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 -/* Enum values, see field(s): */ -/* MC_CMD_FILTER_OP_IN/OP */ -/* Returned filter handle (for insert / subscribe operations). Note that these - * handles should be considered opaque to the host, although a value of - * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. +/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional + * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via + * its rte_flow API. This extension is only useful with the sfc_efx driver + * included as part of DPDK, used in conjunction with the dpdk datapath + * firmware variant. */ -#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 -#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 -#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 -#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 -/* enum: guaranteed invalid filter handle (low 32 bits) */ -#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff -/* enum: guaranteed invalid filter handle (high 32 bits) */ -#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff - -/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ -#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +#define MC_CMD_FILTER_OP_V3_IN_LEN 180 /* identifies the type of operation requested */ -#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 -/* Enum values, see field(s): */ -/* MC_CMD_FILTER_OP_EXT_IN/OP */ -/* Returned filter handle (for insert / subscribe operations). Note that these - * handles should be considered opaque to the host, although a value of - * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. - */ -#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 -#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 -#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 -#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4 /* Enum values, see field(s): */ -/* MC_CMD_FILTER_OP_OUT/HANDLE */ - - -/***********************************/ -/* MC_CMD_GET_PARSER_DISP_INFO - * Get information related to the parser-dispatcher subsystem +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. */ -#define MC_CMD_GET_PARSER_DISP_INFO 0xe4 -#undef MC_CMD_0xe4_PRIVILEGE_CTG - -#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 -/* identifies the type of operation requested */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 -/* enum: read the list of supported RX filter matches */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 -/* enum: read flags indicating restrictions on filter insertion for the calling - * client +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 -/* enum: read properties relating to security rules (Medford-only; for use by - * SolarSecure apps, not directly by drivers. See SF-114946-SW.) +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 -/* enum: read the list of supported RX filter matches for VXLAN/NVGRE - * encapsulated frames, which follow a different match sequence to normal - * frames (Medford only) +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) */ -#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 - -/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ -#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 -#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 -#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) -/* identifies the type of operation requested */ -#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16 +/* Set an action for all packets matching this filter. The DPDK driver and dpdk + * f/w variant use their own specific delivery structures, which are documented + * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything + * other than MATCH_ACTION_NONE when the NIC is running another f/w variant + * will cause the filter insertion to fail with ENOTSUP. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4 +/* enum: do nothing extra */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0 +/* enum: Set the match flag in the packet prefix for packets matching the + * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "FLAG" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1 +/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching + * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "MARK" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2 +/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the + * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX) + * will cause the filter insertion to fail with EINVAL. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4 + +/* MC_CMD_FILTER_OP_OUT msgresponse */ +#define MC_CMD_FILTER_OP_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_INFO + * Get information related to the parser-dispatcher subsystem + */ +#define MC_CMD_GET_PARSER_DISP_INFO 0xe4 +#undef MC_CMD_0xe4_PRIVILEGE_CTG + +#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4 +/* enum: read the list of supported RX filter matches */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +/* enum: read flags indicating restrictions on filter insertion for the calling + * client + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 +/* enum: read properties relating to security rules (Medford-only; for use by + * SolarSecure apps, not directly by drivers. See SF-114946-SW.) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 +/* enum: read the list of supported RX filter matches for VXLAN/NVGRE + * encapsulated frames, which follow a different match sequence to normal + * frames (Medford only) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 + +/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ /* number of supported match types */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4 /* array of supported match types (valid MATCH_FIELDS values for * MC_CMD_FILTER_OP) sorted in decreasing priority order */ @@ -9561,10 +8952,12 @@ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 /* identifies the type of operation requested */ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ /* bitfield of filter insertion restrictions */ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4 #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 @@ -9578,28 +8971,37 @@ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36 /* identifies the type of operation requested */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ /* a version number representing the set of rule lookups that are implemented * by the currently running firmware */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4 /* enum: implements lookup sequences described in SF-114946-SW draft C */ -#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0 /* the number of nodes in the subnet map */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4 /* the number of entries in one subnet map node */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4 /* minimum valid value for a subnet ID in a subnet map leaf */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4 /* maximum valid value for a subnet ID in a subnet map leaf */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4 /* the number of entries in the local and remote port range maps */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4 /* minimum valid value for a portrange ID in a port range map leaf */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4 /* maximum valid value for a portrange ID in a port range map leaf */ #define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4 /***********************************/ @@ -9607,7 +9009,9 @@ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging. * Please note that this interface is only of use to debug tools which have * knowledge of firmware and hardware data structures; nothing here is intended - * for use by normal driver code. + * for use by normal driver code. Note that although this command is in the + * Admin privilege group, in tamperproof adapters, only read operations are + * permitted. */ #define MC_CMD_PARSER_DISP_RW 0xe5 #undef MC_CMD_0xe5_PRIVILEGE_CTG @@ -9618,42 +9022,58 @@ #define MC_CMD_PARSER_DISP_RW_IN_LEN 32 /* identifies the target of the operation */ #define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0 +#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4 /* enum: RX dispatcher CPU */ -#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 +#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 /* enum: TX dispatcher CPU */ -#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 -/* enum: Lookup engine (with original metadata format) */ -#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 +/* enum: Lookup engine (with original metadata format). Deprecated; used only + * by cmdclient as a fallback for very old Huntington firmware, and not + * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA + * instead. + */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 /* enum: Lookup engine (with requested metadata format) */ -#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 /* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */ -#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 +#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 /* enum: RX1 dispatcher CPU (only valid for Medford) */ -#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 +#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 /* enum: Miscellaneous other state (only valid for Medford) */ -#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 +#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 /* identifies the type of operation requested */ #define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 -/* enum: read a word of DICPU DMEM or a LUE entry */ -#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 -/* enum: write a word of DICPU DMEM or a LUE entry */ -#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 -/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */ -#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 +#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4 +/* enum: Read a word of DICPU DMEM or a LUE entry */ +#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 +/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on + * tamperproof adapters. + */ +#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 +/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not + * permitted on tamperproof adapters. + */ +#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 /* data memory address (DICPU targets) or LUE index (LUE targets) */ #define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 +#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4 /* selector (for MISC_STATE target) */ #define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8 +#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4 /* enum: Port to datapath mapping */ -#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 +#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 /* value to write (for DMEM writes) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4 /* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4 /* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4 /* value to write (for LUE writes) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 @@ -9662,6 +9082,7 @@ #define MC_CMD_PARSER_DISP_RW_OUT_LEN 52 /* value read (for DMEM reads) */ #define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4 /* value read (for LUE reads) */ #define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0 #define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20 @@ -9674,8 +9095,8 @@ #define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0 #define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4 #define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4 -#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ -#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ +#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ +#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ /***********************************/ @@ -9707,6 +9128,7 @@ #define MC_CMD_SET_PF_COUNT_IN_LEN 4 /* New number of PFs on the device. */ #define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0 +#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4 /* MC_CMD_SET_PF_COUNT_OUT msgresponse */ #define MC_CMD_SET_PF_COUNT_OUT_LEN 0 @@ -9728,6 +9150,7 @@ #define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4 /* Identifies the port assignment for this function. */ #define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0 +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4 /***********************************/ @@ -9743,6 +9166,7 @@ #define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 /* Identifies the port assignment for this function. */ #define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0 +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4 /* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */ #define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0 @@ -9761,8 +9185,10 @@ #define MC_CMD_ALLOC_VIS_IN_LEN 8 /* The minimum number of VIs that is acceptable */ #define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4 /* The maximum number of VIs that would be useful */ #define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4 /* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. * Use extended version in new code. @@ -9770,21 +9196,26 @@ #define MC_CMD_ALLOC_VIS_OUT_LEN 8 /* The number of VIs allocated on this function */ #define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4 /* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ #define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 /* The number of VIs allocated on this function */ #define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4 /* Function's port vi_shift value (always 0 on Huntington) */ #define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4 /***********************************/ @@ -9820,15 +9251,20 @@ #define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20 /* Number of VFs currently enabled. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4 /* Max number of VFs before sriov stride and offset may need to be changed. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4 #define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0 #define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1 /* RID offset of first VF from PF. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4 /* RID offset of each subsequent VF from the previous. */ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4 /***********************************/ @@ -9844,19 +9280,24 @@ #define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 /* Number of VFs currently enabled. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4 /* Max number of VFs before sriov stride and offset may need to be changed. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4 #define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4 #define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 #define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 /* RID offset of first VF from PF, or 0 for no change, or * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4 /* RID offset of each subsequent VF from the previous, 0 for no change, or * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride. */ #define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4 /* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */ #define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0 @@ -9879,12 +9320,15 @@ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 /* The number of VIs allocated on this function */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4 /* Function's port vi_shift value (always 0 on Huntington) */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4 /***********************************/ @@ -9900,6 +9344,7 @@ #define MC_CMD_DUMP_VI_STATE_IN_LEN 4 /* The VI number to query. */ #define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0 +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4 /* MC_CMD_DUMP_VI_STATE_OUT msgresponse */ #define MC_CMD_DUMP_VI_STATE_OUT_LEN 96 @@ -9933,6 +9378,7 @@ #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24 /* Combined metadata field. */ #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 @@ -10015,6 +9461,7 @@ #define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4 /* Handle for allocated push I/O buffer. */ #define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4 /***********************************/ @@ -10030,6 +9477,7 @@ #define MC_CMD_FREE_PIOBUF_IN_LEN 4 /* Handle for allocated push I/O buffer. */ #define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 /* MC_CMD_FREE_PIOBUF_OUT msgresponse */ #define MC_CMD_FREE_PIOBUF_OUT_LEN 0 @@ -10048,6 +9496,7 @@ #define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4 /* VI number to get information for. */ #define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 /* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */ #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4 @@ -10070,6 +9519,7 @@ #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19 #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1 #define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4 /***********************************/ @@ -10085,6 +9535,7 @@ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8 /* VI number to set information for. */ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 /* Transaction processing steering hint 1 for use with the Rx Queue. */ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4 #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1 @@ -10104,6 +9555,7 @@ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51 #define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1 #define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4 /* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */ #define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0 @@ -10121,22 +9573,25 @@ /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 /* enum: MISC. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 /* enum: IDO. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 /* enum: RO. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 /* enum: TPH Type. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ /* Amalgamated TLP info word. */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1 @@ -10185,10 +9640,12 @@ /* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */ #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ /* Amalgamated TLP info word. */ #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0 @@ -10229,7 +9686,7 @@ #define MC_CMD_SATELLITE_DOWNLOAD 0x91 #undef MC_CMD_0x91_PRIVILEGE_CTG -#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs * are subtle, and so downloads must proceed in a number of phases. @@ -10256,57 +9713,61 @@ * in a command from the host.) */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ /* Target for download. (These match the blob numbers defined in * mc_flash_layout.h.) */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa /* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe /* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf /* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff /* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4 /* enum: Last chunk, containing checksum rather than data */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff /* enum: Abort download of this item */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe /* Length of this chunk in bytes */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4 /* Data for this chunk */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16 #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4 @@ -10317,24 +9778,26 @@ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8 /* Same as MC_CMD_ERR field, but included as 0 in success cases */ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4 /* Extra status information */ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4 /* enum: Code download OK, completed. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 /* enum: Code download aborted as requested. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 /* enum: Code download OK so far, send next chunk. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 /* enum: Download phases out of sequence */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 /* enum: Bad target for this phase */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 /* enum: Chunk ID out of sequence */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 /* enum: Chunk length zero or too large */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 /* enum: Checksum was incorrect */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 /***********************************/ @@ -10356,6 +9819,7 @@ #define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4 #define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4 @@ -10418,48 +9882,58 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -10469,39 +9943,43 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 @@ -10511,36 +9989,42 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4 /* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */ #define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0 @@ -10549,6 +10033,7 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4 @@ -10611,48 +10096,58 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -10662,39 +10157,43 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 @@ -10704,38 +10203,45 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 @@ -10766,6 +10272,30 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -10779,18 +10309,18 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ @@ -10798,9 +10328,9 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ /* Number of VIs available for each external port */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 #define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 @@ -10826,6 +10356,7 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 @@ -10888,48 +10419,58 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -10939,39 +10480,43 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 @@ -10981,38 +10526,45 @@ /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 @@ -11043,6 +10595,30 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -11056,18 +10632,18 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ @@ -11075,9 +10651,9 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ /* Number of VIs available for each external port */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 #define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 @@ -11108,11 +10684,11 @@ /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ @@ -11124,6 +10700,723 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present + * on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present + * on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4 + /***********************************/ /* MC_CMD_V2_EXTN @@ -11144,7 +11437,16 @@ #define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16 #define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10 #define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26 -#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6 +#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2 +/* Type of command/response */ +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28 +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4 +/* enum: MCDI command directed to or response originating from the MC. */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0 +/* enum: MCDI command directed to a TSA controller. MCDI responses of this type + * are not defined. + */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1 /***********************************/ @@ -11163,6 +11465,7 @@ #define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4 /* the bucket id */ #define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4 /***********************************/ @@ -11178,6 +11481,7 @@ #define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4 /* the bucket id */ #define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4 /* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */ #define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0 @@ -11196,17 +11500,22 @@ #define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8 /* the bucket id */ #define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4 /* the rate in mbps */ #define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 +#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4 /* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12 /* the bucket id */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4 /* the rate in mbps */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4 /* the desired maximum fill level */ #define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8 +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ #define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 @@ -11225,10 +11534,13 @@ #define MC_CMD_TCM_TXQ_INIT_IN_LEN 28 /* the txq id */ #define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 +#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 +#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4 /* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 @@ -11237,25 +11549,32 @@ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ #define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 +#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4 /* an already reserved bucket (typically set to bucket associated with outer * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16 +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4 /* an already reserved bucket (typically set to bucket associated with inner * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20 +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4 /* the min bucket (typically for ETS/minimum bandwidth) */ #define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 +#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4 /* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32 /* the txq id */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4 /* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 @@ -11264,18 +11583,23 @@ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4 /* an already reserved bucket (typically set to bucket associated with outer * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4 /* an already reserved bucket (typically set to bucket associated with inner * vswitch) */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4 /* the min bucket (typically for ETS/minimum bandwidth) */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ #define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 @@ -11294,8 +11618,10 @@ #define MC_CMD_LINK_PIOBUF_IN_LEN 8 /* Handle for allocated push I/O buffer. */ #define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 /* Function Local Instance (VI) number. */ #define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4 +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 /* MC_CMD_LINK_PIOBUF_OUT msgresponse */ #define MC_CMD_LINK_PIOBUF_OUT_LEN 0 @@ -11314,6 +11640,7 @@ #define MC_CMD_UNLINK_PIOBUF_IN_LEN 4 /* Function Local Instance (VI) number. */ #define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0 +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 /* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */ #define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0 @@ -11332,20 +11659,23 @@ #define MC_CMD_VSWITCH_ALLOC_IN_LEN 16 /* The port to connect to the v-switch's upstream port. */ #define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* The type of v-switch to create. */ #define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4 /* enum: VLAN */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 /* enum: VEB */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 /* enum: VEPA (obsolete) */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 /* enum: MUX */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 /* enum: Snapper specific; semantics TBD */ -#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 /* Flags controlling v-port creation */ #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 /* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, @@ -11356,6 +11686,7 @@ * v-ports with this number of tags. */ #define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ #define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0 @@ -11374,6 +11705,7 @@ #define MC_CMD_VSWITCH_FREE_IN_LEN 4 /* The port to which the v-switch is connected. */ #define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VSWITCH_FREE_OUT msgresponse */ #define MC_CMD_VSWITCH_FREE_OUT_LEN 0 @@ -11394,6 +11726,7 @@ #define MC_CMD_VSWITCH_QUERY_IN_LEN 4 /* The port to which the v-switch is connected. */ #define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VSWITCH_QUERY_OUT msgresponse */ #define MC_CMD_VSWITCH_QUERY_OUT_LEN 0 @@ -11412,28 +11745,31 @@ #define MC_CMD_VPORT_ALLOC_IN_LEN 20 /* The port to which the v-switch is connected. */ #define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* The type of the new v-port. */ #define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4 /* enum: VLAN (obsolete) */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 /* enum: VEB (obsolete) */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 /* enum: VEPA (obsolete) */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 /* enum: A normal v-port receives packets which match a specified MAC and/or * VLAN. */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 /* enum: An expansion v-port packets traffic which don't match any other * v-port. */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 /* enum: An test v-port receives packets which match any filters installed by * its downstream components. */ -#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 /* Flags controlling v-port creation */ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 #define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1 @@ -11443,8 +11779,10 @@ * v-switch. */ #define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16 @@ -11454,6 +11792,7 @@ #define MC_CMD_VPORT_ALLOC_OUT_LEN 4 /* The handle of the new v-port */ #define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4 /***********************************/ @@ -11469,6 +11808,7 @@ #define MC_CMD_VPORT_FREE_IN_LEN 4 /* The handle of the v-port */ #define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4 /* MC_CMD_VPORT_FREE_OUT msgresponse */ #define MC_CMD_VPORT_FREE_OUT_LEN 0 @@ -11487,18 +11827,23 @@ #define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30 /* The port to connect to the v-adaptor's port. */ #define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* Flags controlling v-adaptor creation */ #define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 /* The number of VLAN tags to strip on receive */ #define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4 /* The number of VLAN tags to transparently insert/remove. */ #define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16 @@ -11507,7 +11852,7 @@ #define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24 #define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6 /* enum: Derive the MAC address from the upstream port */ -#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 +#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 /* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */ #define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0 @@ -11526,6 +11871,7 @@ #define MC_CMD_VADAPTOR_FREE_IN_LEN 4 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VADAPTOR_FREE_OUT msgresponse */ #define MC_CMD_VADAPTOR_FREE_OUT_LEN 0 @@ -11544,6 +11890,7 @@ #define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 /* The new MAC address to assign to this v-adaptor */ #define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4 #define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6 @@ -11565,6 +11912,7 @@ #define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */ #define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6 @@ -11586,15 +11934,19 @@ #define MC_CMD_VADAPTOR_QUERY_IN_LEN 4 /* The port to which the v-adaptor is connected. */ #define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */ #define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ #define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4 /* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */ #define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4 +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4 /* The number of VLAN tags that may still be added */ #define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8 +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 /***********************************/ @@ -11610,8 +11962,10 @@ #define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8 /* The port to assign. */ #define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4 /* The target function to modify. */ #define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4 #define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0 #define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16 #define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16 @@ -11633,9 +11987,13 @@ /* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ #define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4 /* Write enable bits 0-3, set to write, clear to read. */ #define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128 #define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4 @@ -11647,9 +12005,13 @@ */ #define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4 #define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4 /***********************************/ @@ -11665,11 +12027,13 @@ #define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 /* The handle of the owning upstream port */ #define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */ #define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4 /* The handle of the new Onload stack */ #define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4 /***********************************/ @@ -11685,6 +12049,7 @@ #define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 /* The handle of the Onload stack */ #define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4 /* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */ #define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0 @@ -11703,21 +12068,24 @@ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12 /* The handle of the owning upstream port */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* The type of context to allocate */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4 /* enum: Allocate a context for exclusive use. The key and indirection table * must be explicitly configured. */ -#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 /* enum: Allocate a context for shared use; this will spread across a range of * queues, but the key and indirection table are pre-configured and may not be * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. */ -#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 /* Number of queues spanned by this context, in the range 1-64; valid offsets * in the indirection table will be in the range 0 to NUM_QUEUES-1. */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 @@ -11726,8 +12094,9 @@ * handle. */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4 /* enum: guaranteed invalid RSS context handle value */ -#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff /***********************************/ @@ -11743,6 +12112,7 @@ #define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0 @@ -11761,6 +12131,7 @@ #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4 /* The 40-byte Toeplitz hash key (TBD endianness issues?) */ #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40 @@ -11782,6 +12153,7 @@ #define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44 @@ -11803,6 +12175,7 @@ #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 /* The 128-byte indirection table (1 byte per entry) */ #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128 @@ -11824,6 +12197,7 @@ #define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132 @@ -11845,6 +12219,7 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 /* Hash control flags. The _EN bits are always supported, but new modes are * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES: * in this case, the MODE fields may be set to non-zero values, and will take @@ -11858,6 +12233,7 @@ * particular packet type.) */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1 @@ -11898,6 +12274,7 @@ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 @@ -11915,6 +12292,7 @@ * always be used for a SET regardless of old/new driver vs. old/new firmware. */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1 @@ -11952,11 +12330,13 @@ #define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8 /* The handle of the owning upstream port */ #define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 /* Number of queues spanned by this mapping, in the range 1-64; valid fixed * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and * referenced RSS contexts must span no more than this number. */ #define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4 +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 @@ -11965,8 +12345,9 @@ * handle. */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4 /* enum: guaranteed invalid .1p mapping handle value */ -#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff /***********************************/ @@ -11982,6 +12363,7 @@ #define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4 /* The handle of the .1p mapping */ #define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4 /* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0 @@ -12000,6 +12382,7 @@ #define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36 /* The handle of the .1p mapping */ #define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 /* Per-priority mappings (1 32-bit word per entry - an offset or RSS context * handle) */ @@ -12023,6 +12406,7 @@ #define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4 /* The handle of the .1p mapping */ #define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 /* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36 @@ -12049,10 +12433,13 @@ #define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12 /* Base absolute interrupt vector number. */ #define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0 +#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4 /* Number of interrupt vectors allocate to this PF. */ #define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4 +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4 /* Number of interrupt vectors to allocate per VF. */ #define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8 +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4 /***********************************/ @@ -12070,10 +12457,13 @@ * let the system find a suitable base. */ #define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0 +#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4 /* Number of interrupt vectors allocate to this PF. */ #define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4 +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4 /* Number of interrupt vectors to allocate per VF. */ #define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8 +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4 /* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */ #define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0 @@ -12092,6 +12482,7 @@ #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 /* The handle of the v-port */ #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4 /* MAC address to add */ #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 @@ -12113,6 +12504,7 @@ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 /* The handle of the v-port */ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4 /* MAC address to add */ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4 #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6 @@ -12134,6 +12526,7 @@ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4 /* The handle of the v-port */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4 /* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4 @@ -12141,6 +12534,7 @@ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num)) /* The number of MAC addresses returned */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4 /* Array of MAC addresses */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4 #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6 @@ -12163,8 +12557,10 @@ #define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44 /* The handle of the v-port */ #define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4 /* Flags requesting what should be changed. */ #define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1 @@ -12174,14 +12570,17 @@ * v-switch. */ #define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16 /* The number of MAC addresses to add */ #define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4 /* MAC addresses to add */ #define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20 #define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6 @@ -12190,6 +12589,7 @@ /* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */ #define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 @@ -12207,15 +12607,18 @@ #define MC_CMD_EVB_PORT_QUERY_IN_LEN 4 /* The handle of the v-port */ #define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4 /* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */ #define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ #define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4 /* The number of VLAN tags that may be used on a v-adaptor connected to this * EVB port. */ #define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4 +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 /***********************************/ @@ -12228,14 +12631,16 @@ #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab #undef MC_CMD_0xab_PRIVILEGE_CTG -#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8 /* Index of the first buffer table entry. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 /* Number of buffer table entries to dump. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 /* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 @@ -12260,16 +12665,17 @@ /* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */ #define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4 #define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2 /* enum: pad to 64 bytes */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 /* enum: pad to 128 bytes (Medford only) */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 /* enum: pad to 256 bytes (Medford only) */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 /* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ #define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 @@ -12290,6 +12696,7 @@ /* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */ #define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4 #define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1 @@ -12314,8 +12721,10 @@ #define MC_CMD_GET_CLOCK_OUT_LEN 8 /* System frequency, MHz */ #define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4 /* DPCPU frequency, MHz */ #define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4 +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4 /***********************************/ @@ -12325,69 +12734,83 @@ #define MC_CMD_SET_CLOCK 0xad #undef MC_CMD_0xad_PRIVILEGE_CTG -#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_CLOCK_IN msgrequest */ #define MC_CMD_SET_CLOCK_IN_LEN 28 /* Requested frequency in MHz for system clock domain */ #define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 +#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4 /* enum: Leave the system clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for inter-core clock domain */ #define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 +#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4 /* enum: Leave the inter-core clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for DPCPU clock domain */ #define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 +#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4 /* enum: Leave the DPCPU clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for PCS clock domain */ #define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4 /* enum: Leave the PCS clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for MC clock domain */ #define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4 /* enum: Leave the MC clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for rmon clock domain */ #define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4 /* enum: Leave the rmon clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 /* Requested frequency in MHz for vswitch clock domain */ #define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4 /* enum: Leave the vswitch clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 +#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 /* MC_CMD_SET_CLOCK_OUT msgresponse */ #define MC_CMD_SET_CLOCK_OUT_LEN 28 /* Resulting system frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4 /* enum: The system clock domain doesn't exist */ -#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 /* Resulting inter-core frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4 /* enum: The inter-core clock domain doesn't exist / isn't used */ -#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 /* Resulting DPCPU frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 +#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4 /* enum: The dpcpu clock domain doesn't exist */ -#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 /* Resulting PCS frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4 /* enum: The PCS clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 /* Resulting MC frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4 /* enum: The MC clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 /* Resulting rmon frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4 /* enum: The rmon clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 /* Resulting vswitch frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4 /* enum: The vswitch clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 /***********************************/ @@ -12397,27 +12820,28 @@ #define MC_CMD_DPCPU_RPC 0xae #undef MC_CMD_0xae_PRIVILEGE_CTG -#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DPCPU_RPC_IN msgrequest */ #define MC_CMD_DPCPU_RPC_IN_LEN 36 #define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 +#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4 /* enum: RxDPCPU0 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 /* enum: TxDPCPU0 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 /* enum: TxDPCPU1 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 /* enum: RxDPCPU1 (Medford only) */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 /* enum: RxDPCPU (will be for the calling function; for now, just an alias of * DPCPU_RX0) */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 /* enum: TxDPCPU (will be for the calling function; for now, just an alias of * DPCPU_TX0) */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be * initialised to zero */ @@ -12425,15 +12849,15 @@ #define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8 -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16 @@ -12444,11 +12868,11 @@ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64 @@ -12457,21 +12881,24 @@ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12 #define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24 /* Register data to write. Only valid in write/write-read. */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4 /* Register address. */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4 /* MC_CMD_DPCPU_RPC_OUT msgresponse */ #define MC_CMD_DPCPU_RPC_OUT_LEN 36 #define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0 +#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4 /* DATA */ #define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4 #define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32 @@ -12482,9 +12909,13 @@ #define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12 #define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4 /***********************************/ @@ -12500,6 +12931,7 @@ #define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4 /* Interrupt level relative to base for function. */ #define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0 +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4 /* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */ #define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 @@ -12512,14 +12944,15 @@ #define MC_CMD_SHMBOOT_OP 0xe6 #undef MC_CMD_0xe6_PRIVILEGE_CTG -#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SHMBOOT_OP_IN msgrequest */ #define MC_CMD_SHMBOOT_OP_IN_LEN 4 /* Identifies the operation to perform */ #define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4 /* enum: Copy slave_data section to the slave core. (Greenport only) */ -#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 /* MC_CMD_SHMBOOT_OP_OUT msgresponse */ #define MC_CMD_SHMBOOT_OP_OUT_LEN 0 @@ -12532,13 +12965,16 @@ #define MC_CMD_CAP_BLK_READ 0xe7 #undef MC_CMD_0xe7_PRIVILEGE_CTG -#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_CAP_BLK_READ_IN msgrequest */ #define MC_CMD_CAP_BLK_READ_IN_LEN 12 #define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0 +#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4 #define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4 +#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4 #define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8 +#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4 /* MC_CMD_CAP_BLK_READ_OUT msgresponse */ #define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8 @@ -12559,53 +12995,77 @@ #define MC_CMD_DUMP_DO 0xe8 #undef MC_CMD_0xe8_PRIVILEGE_CTG -#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DUMP_DO_IN msgrequest */ #define MC_CMD_DUMP_DO_IN_LEN 52 #define MC_CMD_DUMP_DO_IN_PADDING_OFST 0 +#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4 -#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 -#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 -#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 /* enum: The uart port this command was received over (if using a uart * transport) */ -#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff +#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff #define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28 -#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ -#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4 #define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 /* MC_CMD_DUMP_DO_OUT msgresponse */ #define MC_CMD_DUMP_DO_OUT_LEN 4 #define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0 +#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4 /***********************************/ @@ -12615,41 +13075,64 @@ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9 #undef MC_CMD_0xe9_PRIVILEGE_CTG -#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 /***********************************/ @@ -12661,17 +13144,20 @@ #define MC_CMD_SET_PSU 0xea #undef MC_CMD_0xea_PRIVILEGE_CTG -#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_SET_PSU_IN msgrequest */ #define MC_CMD_SET_PSU_IN_LEN 12 #define MC_CMD_SET_PSU_IN_PARAM_OFST 0 -#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_PARAM_LEN 4 +#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ #define MC_CMD_SET_PSU_IN_RAIL_OFST 4 -#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ -#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_LEN 4 +#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ /* desired value, eg voltage in mV */ #define MC_CMD_SET_PSU_IN_VALUE_OFST 8 +#define MC_CMD_SET_PSU_IN_VALUE_LEN 4 /* MC_CMD_SET_PSU_OUT msgresponse */ #define MC_CMD_SET_PSU_OUT_LEN 0 @@ -12692,7 +13178,9 @@ /* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */ #define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8 #define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4 #define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4 /***********************************/ @@ -12704,7 +13192,7 @@ #define MC_CMD_ENABLE_OFFLINE_BIST 0xed #undef MC_CMD_0xed_PRIVILEGE_CTG -#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */ #define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0 @@ -12730,12 +13218,16 @@ #define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num)) /* CRC32 over OFFSET, LENGTH, RESERVED, DATA */ #define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0 +#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4 /* Offset at which to write the data */ #define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4 +#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4 /* Length of data */ #define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8 +#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4 /* Reserved for future use */ #define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4 #define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16 #define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1 #define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0 @@ -12759,12 +13251,16 @@ #define MC_CMD_UART_RECV_DATA_OUT_LEN 16 /* CRC32 over OFFSET, LENGTH, RESERVED */ #define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0 +#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4 /* Offset from which to read the data */ #define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4 +#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4 /* Length of data */ #define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8 +#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4 /* Reserved for future use */ #define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4 /* MC_CMD_UART_RECV_DATA_IN msgresponse */ #define MC_CMD_UART_RECV_DATA_IN_LENMIN 16 @@ -12772,12 +13268,16 @@ #define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num)) /* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */ #define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0 +#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4 /* Offset at which to write the data */ #define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4 /* Length of data */ #define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4 /* Reserved for future use */ #define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12 +#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4 #define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16 #define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1 #define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0 @@ -12791,14 +13291,16 @@ #define MC_CMD_READ_FUSES 0xf0 #undef MC_CMD_0xf0_PRIVILEGE_CTG -#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_READ_FUSES_IN msgrequest */ #define MC_CMD_READ_FUSES_IN_LEN 8 /* Offset in OTP to read */ #define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0 +#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4 /* Length of data to read in bytes */ #define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4 +#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4 /* MC_CMD_READ_FUSES_OUT msgresponse */ #define MC_CMD_READ_FUSES_OUT_LENMIN 4 @@ -12806,6 +13308,7 @@ #define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) /* Length of returned OTP data in bytes */ #define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 +#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4 /* Returned data */ #define MC_CMD_READ_FUSES_OUT_DATA_OFST 4 #define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 @@ -12820,7 +13323,7 @@ #define MC_CMD_KR_TUNE 0xf1 #undef MC_CMD_0xf1_PRIVILEGE_CTG -#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_KR_TUNE_IN msgrequest */ #define MC_CMD_KR_TUNE_IN_LENMIN 4 @@ -12830,26 +13333,30 @@ #define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0 #define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1 /* enum: Get current RXEQ settings */ -#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0 +#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0 /* enum: Override RXEQ settings */ -#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1 +#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1 /* enum: Get current TX Driver settings */ -#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2 +#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2 /* enum: Override TX Driver settings */ -#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 +#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 /* enum: Force KR Serdes reset / recalibration */ -#define MC_CMD_KR_TUNE_IN_RECAL 0x4 +#define MC_CMD_KR_TUNE_IN_RECAL 0x4 /* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid * signal. */ -#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5 +#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5 /* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The * caller should call this command repeatedly after starting eye plot, until no * more data is returned. */ -#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 +#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 /* enum: Read Figure Of Merit (eye quality, higher is better). */ -#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 +#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 +/* enum: Start/stop link training frames */ +#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_RUN 0x8 +/* enum: Issue KR link training command (control training coefficients) */ +#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_CMD 0x9 /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 @@ -12883,44 +13390,98 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: Attenuation (0-15, Huntington) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 /* enum: CTLE Boost (0-15, Huntington) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 /* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max * positive, Medford - 0-31) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 /* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-31) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 /* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-16) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 /* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-16) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 /* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max * positive, Medford - 0-16) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 /* enum: Edge DFE DLEV (0-128 for Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 /* enum: Variable Gain Amplifier (0-15, Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8 /* enum: CTLE EQ Capacitor (0-15, Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 /* enum: CTLE EQ Resistor (0-7, Medford) */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +/* enum: CTLE gain (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb +/* enum: CTLE pole (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc +/* enum: CTLE peaking (0-31, Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd +/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe +/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf +/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10 +/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11 +/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12 +/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13 +/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14 +/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15 +/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16 +/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17 +/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18 +/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19 +/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a +/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b +/* enum: Negative h1 polarity data sampler offset calibration code, even path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c +/* enum: Negative h1 polarity data sampler offset calibration code, odd path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d +/* enum: Positive h1 polarity data sampler offset calibration code, even path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e +/* enum: Positive h1 polarity data sampler offset calibration code, odd path + * (Medford2 - 6 bit signed (-29 - +29))) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f +/* enum: CDR calibration loop code (Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20 +/* enum: CDR integral loop code (Medford2) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ -#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 @@ -12985,39 +13546,39 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: TX Amplitude (Huntington, Medford) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 +/* enum: TX Amplitude (Huntington, Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 /* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 /* enum: De-Emphasis Tap1 Fine */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 /* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 /* enum: De-Emphasis Tap2 Fine (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 /* enum: Pre-Emphasis Magnitude (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 /* enum: Pre-Emphasis Fine (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 /* enum: TX Slew Rate Coarse control (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 /* enum: TX Slew Rate Fine control (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 /* enum: TX Termination Impedance control (Huntington) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 /* enum: TX Amplitude Fine control (Medford) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa -/* enum: Pre-shoot Tap (Medford) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb -/* enum: De-emphasis Tap (Medford) */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa +/* enum: Pre-shoot Tap (Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb +/* enum: De-emphasis Tap (Medford, Medford2) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ -#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16 @@ -13078,7 +13639,27 @@ /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 +/* Port-relative lane to scan eye on */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12 +/* Requested operation */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1 +/* Scan duration / cycle count */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4 /* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */ #define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0 @@ -13110,10 +13691,91 @@ #define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1 /* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */ #define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4 #define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4 + +/* MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN msgrequest */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_STOP 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_START 0x1 /* enum */ + +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN msgrequest */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LEN 28 +/* Requested operation */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_LEN 4 +/* Set INITIALIZE state */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_OFST 8 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_LEN 4 +/* Set PRESET state */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_LEN 4 +/* C(-1) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_HOLD 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_INCREMENT 0x1 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_DECREMENT 0x2 /* enum */ +/* C(0) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(+1) request */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_OFST 24 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ + +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT msgresponse */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_LEN 24 +/* C(-1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_OFST 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_LEN 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_NOT_UPDATED 0x0 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_UPDATED 0x1 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MINIMUM 0x2 /* enum */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MAXIMUM 0x3 /* enum */ +/* C(0) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_OFST 4 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_OFST 8 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(-1) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4 +/* C(0) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4 +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4 /***********************************/ @@ -13123,7 +13785,7 @@ #define MC_CMD_PCIE_TUNE 0xf2 #undef MC_CMD_0xf2_PRIVILEGE_CTG -#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_PCIE_TUNE_IN msgrequest */ #define MC_CMD_PCIE_TUNE_IN_LENMIN 4 @@ -13133,22 +13795,22 @@ #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0 #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1 /* enum: Get current RXEQ settings */ -#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 +#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 /* enum: Override RXEQ settings */ -#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 +#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 /* enum: Get current TX Driver settings */ -#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 +#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 /* enum: Override TX Driver settings */ -#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 +#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 /* enum: Start PCIe Serdes Eye diagram plot on a given lane. */ -#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 +#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 /* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The * caller should call this command repeatedly after starting eye plot, until no * more data is returned. */ -#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 +#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 /* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */ -#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7 +#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7 /* Align the arguments to 32 bits */ #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1 #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3 @@ -13182,46 +13844,46 @@ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: Attenuation (0-15) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 /* enum: CTLE Boost (0-15) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 /* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 /* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 /* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 /* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 /* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 /* enum: DFE DLev */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 /* enum: Figure of Merit */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 /* enum: CTLE EQ Capacitor (HF Gain) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 /* enum: CTLE EQ Resistor (DC Gain) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14 @@ -13285,15 +13947,15 @@ #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: TxMargin (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 /* enum: TxSwing (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 /* enum: De-emphasis coefficient C(-1) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 /* enum: De-emphasis coefficient C(0) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 /* enum: De-emphasis coefficient C(+1) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4 /* Enum values, see field(s): */ @@ -13312,6 +13974,7 @@ #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 /* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */ #define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0 @@ -13355,38 +14018,46 @@ #define MC_CMD_LICENSING_IN_LEN 4 /* identifies the type of operation requested */ #define MC_CMD_LICENSING_IN_OP_OFST 0 +#define MC_CMD_LICENSING_IN_OP_LEN 4 /* enum: re-read and apply licenses after a license key partition update; note * that this operation returns a zero-length response */ -#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 +#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 /* enum: report counts of installed licenses */ -#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 +#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 /* MC_CMD_LICENSING_OUT msgresponse */ #define MC_CMD_LICENSING_OUT_LEN 28 /* count of application keys which are valid */ #define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0 +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4 /* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with * MC_CMD_FC_OP_LICENSE) */ #define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4 +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4 /* count of application keys which are invalid due to being blacklisted */ #define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8 +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4 /* count of application keys which are invalid due to being unverifiable */ #define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12 +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4 /* count of application keys which are invalid due to being for the wrong node */ #define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16 +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4 /* licensing state (for diagnostics; the exact meaning of the bits in this * field are private to the firmware) */ #define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20 +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4 /* licensing subsystem self-test report (for manftest) */ #define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24 +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4 /* enum: licensing subsystem self-test failed */ -#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 +#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 /* enum: licensing subsystem self-test passed */ -#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 +#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 /***********************************/ @@ -13403,37 +14074,44 @@ #define MC_CMD_LICENSING_V3_IN_LEN 4 /* identifies the type of operation requested */ #define MC_CMD_LICENSING_V3_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_IN_OP_LEN 4 /* enum: re-read and apply licenses after a license key partition update; note * that this operation returns a zero-length response */ -#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 +#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 /* enum: report counts of installed licenses Returns EAGAIN if license * processing (updating) has been started but not yet completed. */ -#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 +#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 /* MC_CMD_LICENSING_V3_OUT msgresponse */ #define MC_CMD_LICENSING_V3_OUT_LEN 88 /* count of keys which are valid */ #define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0 +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4 /* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with * MC_CMD_FC_OP_LICENSE) */ #define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4 +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4 /* count of keys which are invalid due to being unverifiable */ #define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8 +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4 /* count of keys which are invalid due to being for the wrong node */ #define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12 +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4 /* licensing state (for diagnostics; the exact meaning of the bits in this * field are private to the firmware) */ #define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4 /* licensing subsystem self-test report (for manftest) */ #define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4 /* enum: licensing subsystem self-test failed */ -#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 /* enum: licensing subsystem self-test passed */ -#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 /* bitmask of licensed applications */ #define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24 #define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8 @@ -13471,8 +14149,10 @@ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) /* type of license (eg 3) */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4 /* length of the license ID (in bytes) */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4 /* the unique license ID of the adapter */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8 #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 @@ -13512,15 +14192,17 @@ #define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4 /* application ID to query (LICENSED_APP_ID_xxx) */ #define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4 /* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */ #define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4 /* state of this application */ #define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4 /* enum: no (or invalid) license is present for the application */ -#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 /* enum: a valid license is present for the application */ -#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 /***********************************/ @@ -13548,10 +14230,11 @@ #define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4 /* state of this application */ #define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4 /* enum: no (or invalid) license is present for the application */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 /* enum: a valid license is present for the application */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 /***********************************/ @@ -13600,12 +14283,14 @@ #define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) /* application ID */ #define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4 /* the type of operation requested */ #define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4 /* enum: validate application */ -#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 /* enum: mask application */ -#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 /* arguments specific to this particular operation */ #define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 #define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 @@ -13626,8 +14311,10 @@ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 /* application ID */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4 /* the type of operation requested */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4 /* validation challenge */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8 #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64 @@ -13636,6 +14323,7 @@ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68 /* feature expiry (time_t) */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4 /* validation response */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 @@ -13644,10 +14332,13 @@ #define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 /* application ID */ #define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4 /* the type of operation requested */ #define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4 /* flag */ #define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4 /* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ #define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 @@ -13686,12 +14377,14 @@ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96 /* application expiry time */ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4 /* application expiry units */ #define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4 /* enum: expiry units are accounting units */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 /* enum: expiry units are calendar days */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 /* base MAC address of the NIC stored in NVRAM (note that this is a constant * value for a given NIC regardless which function is calling, effectively this * is PF0 base MAC address) @@ -13712,7 +14405,7 @@ #define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 #undef MC_CMD_0xd5_PRIVILEGE_CTG -#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL +#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12 @@ -13723,10 +14416,11 @@ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4 /* whether to turn on or turn off the masked features */ #define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4 /* enum: turn the features off */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 /* enum: turn the features back on */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 /* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */ #define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0 @@ -13743,29 +14437,31 @@ #define MC_CMD_LICENSING_V3_TEMPORARY 0xd6 #undef MC_CMD_0xd6_PRIVILEGE_CTG -#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL +#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4 /* operation code */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4 /* enum: install a new license, overwriting any existing temporary license. * This is an asynchronous operation owing to the time taken to validate an * ECDSA license */ -#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 +#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 /* enum: clear the license immediately rather than waiting for the next power * cycle */ -#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 +#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 /* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET * operation */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4 /* ECDSA license and signature */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160 @@ -13773,23 +14469,26 @@ /* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */ #define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4 #define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4 /* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */ #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12 /* status code */ #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4 /* enum: finished validating and installing license */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 /* enum: license validation and installation in progress */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 /* enum: licensing error. More specific error messages are not provided to * avoid exposing details of the licensing system to the client */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 /* bitmask of licensed features */ #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4 #define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8 @@ -13814,23 +14513,27 @@ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16 /* configuration flags */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1 /* receive queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note * that these handles should be considered opaque to the host, although a value * of 0xFFFFFFFF is guaranteed never to be a valid handle. */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 /* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0 @@ -13845,7 +14548,7 @@ #define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8 #undef MC_CMD_0xf8_PRIVILEGE_CTG -#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0 @@ -13854,20 +14557,24 @@ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16 /* configuration flags */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1 /* receiving queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 /* enum: receiving to just the specified queue */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 /* enum: receiving to multiple queues using RSS context */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 /***********************************/ @@ -13885,19 +14592,21 @@ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) /* the type of configuration setting to change */ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 /* enum: Per-TXQ enable for multicast UDP destination lookup for possible * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) */ -#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 /* enum: Per-v-adaptor enable for suppression of self-transmissions on the * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single * boolean.) */ -#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 /* handle for the entity to update: queue handle, EVB port ID, etc. depending * on the type of configuration setting being changed */ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 /* new value: the details depend on the type of configuration setting being * changed */ @@ -13923,12 +14632,14 @@ #define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 /* the type of configuration setting to read */ #define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ /* handle for the entity to query: queue handle, EVB port ID, etc. depending on * the type of configuration setting being read */ #define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 /* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 @@ -13962,21 +14673,25 @@ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16 /* configuration flags */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 /* receive queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 /* enum: receive to just the specified queue */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 /* enum: receive to multiple queues using RSS context */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note * that these handles should be considered opaque to the host, although a value * of 0xFFFFFFFF is guaranteed never to be a valid handle. */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 /* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0 @@ -13991,7 +14706,7 @@ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc #undef MC_CMD_0xfc_PRIVILEGE_CTG -#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0 @@ -14000,18 +14715,22 @@ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16 /* configuration flags */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 /* receiving queue handle (for RSS mode, this is the base queue) */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 /* receive mode */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 /* enum: receiving to just the specified queue */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 /* enum: receiving to multiple queues using RSS context */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 /* RSS context (for RX_MODE_RSS) */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 /***********************************/ @@ -14027,16 +14746,22 @@ #define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8 /* The rx queue to get stats for. */ #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 /* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */ #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4 /***********************************/ @@ -14044,6 +14769,9 @@ * Find out about available PCIE resources */ #define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd +#undef MC_CMD_0xfd_PRIVILEGE_CTG + +#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0 @@ -14052,20 +14780,27 @@ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28 /* The maximum number of PFs the device can expose */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4 /* The maximum number of VFs the device can expose in total */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4 /* The maximum number of MSI-X vectors the device can provide in total */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4 /* the number of MSI-X vectors the device will allocate by default to each PF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4 /* the number of MSI-X vectors the device will allocate by default to each VF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4 /* the maximum number of MSI-X vectors the device can allocate to any one PF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4 /* the maximum number of MSI-X vectors the device can allocate to any one VF */ #define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24 +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4 /***********************************/ @@ -14084,10 +14819,13 @@ #define MC_CMD_GET_PORT_MODES_OUT_LEN 12 /* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */ #define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4 /* Default (canonical) board mode */ #define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4 /* Current board mode */ #define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4 /***********************************/ @@ -14097,21 +14835,26 @@ #define MC_CMD_READ_ATB 0x100 #undef MC_CMD_0x100_PRIVILEGE_CTG -#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_READ_ATB_IN msgrequest */ #define MC_CMD_READ_ATB_IN_LEN 16 #define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 -#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ -#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ -#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4 +#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ #define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4 #define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4 #define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4 /* MC_CMD_READ_ATB_OUT msgresponse */ #define MC_CMD_READ_ATB_OUT_LEN 4 #define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4 /***********************************/ @@ -14129,7 +14872,9 @@ /* Each workaround is represented by a single bit according to the enums below. */ #define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4 #define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4 /* enum: Bug 17230 work around. */ #define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 /* enum: Bug 35388 work around (unsafe EVQ writes). */ @@ -14165,50 +14910,63 @@ * 1,3 = 0x00030001 */ #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 -#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ /* New privilege mask to be set. The mask will only be changed if the MSB is * set to 1. */ #define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ /* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ /* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC * adress. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 /* enum: Privilege that allows a Function to change the MAC address configured * in its associated vAdapter/vPort. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 /* enum: Privilege that allows a Function to install filters that specify VLANs * that are not in the permit list for the associated vPort. This privilege is * primarily to support ESX where vPorts are created that restrict traffic to * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT. */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 +/* enum: Privilege for insecure commands. Commands that belong to this group + * are not permitted on secure adapters regardless of the privilege mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000 +/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for + * administrator-level operations that are not allowed from the local host once + * an adapter has Bound to a remote ServerLock Controller (see doxbox + * SF-117064-DG for background). + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000 /* enum: Set this bit to indicate that a new privilege mask is to be set, * otherwise the command will only read the existing mask. */ -#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 /* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ #define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 /* For an admin function, always all the privileges are reported. */ #define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4 /***********************************/ @@ -14226,27 +14984,30 @@ * e.g. VF 1,3 = 0x00030001 */ #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 /* New link state mode to be set */ #define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ /* enum: Use this value to just read the existing setting without modifying it. */ -#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff /* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ #define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 #define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4 /***********************************/ /* MC_CMD_GET_SNAPSHOT_LENGTH - * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH + * Obtain the current range of allowable values for the SNAPSHOT_LENGTH * parameter to MC_CMD_INIT_RXQ. */ #define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 @@ -14261,8 +15022,10 @@ #define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8 /* Minimum acceptable snapshot length. */ #define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0 +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4 /* Maximum acceptable snapshot length. */ #define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4 +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4 /***********************************/ @@ -14272,7 +15035,7 @@ #define MC_CMD_FUSE_DIAGS 0x102 #undef MC_CMD_0x102_PRIVILEGE_CTG -#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_FUSE_DIAGS_IN msgrequest */ #define MC_CMD_FUSE_DIAGS_IN_LEN 0 @@ -14281,28 +15044,40 @@ #define MC_CMD_FUSE_DIAGS_OUT_LEN 48 /* Total number of mismatched bits between pairs in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4 /* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4 /* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4 /* Checksum of data after logical OR of pairs in area 0 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4 /* Total number of mismatched bits between pairs in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4 /* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4 /* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4 /* Checksum of data after logical OR of pairs in area 1 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4 /* Total number of mismatched bits between pairs in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4 /* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4 /* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4 /* Checksum of data after logical OR of pairs in area 2 */ #define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4 /***********************************/ @@ -14320,14 +15095,16 @@ #define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 /* The groups of functions to have their privilege masks modified. */ #define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 -#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ /* For VFS_OF_PF specify the PF, for ONE specify the target function */ #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 @@ -14336,10 +15113,12 @@ * refer to the command MC_CMD_PRIVILEGE_MASK */ #define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4 /* Privileges to be removed from the target functions. For privilege * definitions refer to the command MC_CMD_PRIVILEGE_MASK */ #define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4 /* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ #define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 @@ -14358,8 +15137,10 @@ #define MC_CMD_XPM_READ_BYTES_IN_LEN 8 /* Start address (byte) */ #define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0 +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4 +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4 /* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ #define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 @@ -14379,7 +15160,7 @@ #define MC_CMD_XPM_WRITE_BYTES 0x104 #undef MC_CMD_0x104_PRIVILEGE_CTG -#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ #define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 @@ -14387,8 +15168,10 @@ #define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) /* Start address (byte) */ #define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4 +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4 /* Data */ #define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8 #define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 @@ -14406,14 +15189,16 @@ #define MC_CMD_XPM_READ_SECTOR 0x105 #undef MC_CMD_0x105_PRIVILEGE_CTG -#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_READ_SECTOR_IN msgrequest */ #define MC_CMD_XPM_READ_SECTOR_IN_LEN 8 /* Sector index */ #define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4 /* Sector size */ #define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4 /* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ #define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 @@ -14421,10 +15206,12 @@ #define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) /* Sector type */ #define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 -#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ /* Sector data */ #define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 #define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 @@ -14439,7 +15226,7 @@ #define MC_CMD_XPM_WRITE_SECTOR 0x106 #undef MC_CMD_0x106_PRIVILEGE_CTG -#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ #define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 @@ -14456,10 +15243,12 @@ #define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3 /* Sector type */ #define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ /* Sector size */ #define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4 /* Sector data */ #define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12 #define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 @@ -14470,6 +15259,7 @@ #define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 /* New sector index */ #define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4 /***********************************/ @@ -14479,12 +15269,13 @@ #define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 #undef MC_CMD_0x107_PRIVILEGE_CTG -#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */ #define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4 /* Sector index */ #define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0 +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4 /* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */ #define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0 @@ -14497,14 +15288,16 @@ #define MC_CMD_XPM_BLANK_CHECK 0x108 #undef MC_CMD_0x108_PRIVILEGE_CTG -#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */ #define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8 /* Start address (byte) */ #define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0 +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4 /* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ #define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 @@ -14512,6 +15305,7 @@ #define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) /* Total number of bad (non-blank) locations */ #define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4 /* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit * into MCDI response) */ @@ -14528,14 +15322,16 @@ #define MC_CMD_XPM_REPAIR 0x109 #undef MC_CMD_0x109_PRIVILEGE_CTG -#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_REPAIR_IN msgrequest */ #define MC_CMD_XPM_REPAIR_IN_LEN 8 /* Start address (byte) */ #define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0 +#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4 /* Count (bytes) */ #define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4 +#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4 /* MC_CMD_XPM_REPAIR_OUT msgresponse */ #define MC_CMD_XPM_REPAIR_OUT_LEN 0 @@ -14549,7 +15345,7 @@ #define MC_CMD_XPM_DECODER_TEST 0x10a #undef MC_CMD_0x10a_PRIVILEGE_CTG -#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_DECODER_TEST_IN msgrequest */ #define MC_CMD_XPM_DECODER_TEST_IN_LEN 0 @@ -14569,7 +15365,7 @@ #define MC_CMD_XPM_WRITE_TEST 0x10b #undef MC_CMD_0x10b_PRIVILEGE_CTG -#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_TEST_IN msgrequest */ #define MC_CMD_XPM_WRITE_TEST_IN_LEN 0 @@ -14590,16 +15386,19 @@ #define MC_CMD_EXEC_SIGNED 0x10c #undef MC_CMD_0x10c_PRIVILEGE_CTG -#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_EXEC_SIGNED_IN msgrequest */ #define MC_CMD_EXEC_SIGNED_IN_LEN 28 /* the length of code to include in the CMAC */ #define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0 +#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4 /* the length of date to include in the CMAC */ #define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4 +#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4 /* the XPM sector containing the key to use */ #define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8 +#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4 /* the expected CMAC value */ #define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12 #define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16 @@ -14617,12 +15416,13 @@ #define MC_CMD_PREPARE_SIGNED 0x10d #undef MC_CMD_0x10d_PRIVILEGE_CTG -#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_PREPARE_SIGNED_IN msgrequest */ #define MC_CMD_PREPARE_SIGNED_IN_LEN 4 /* the length of data area to clear */ #define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0 +#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4 /* MC_CMD_PREPARE_SIGNED_OUT msgresponse */ #define MC_CMD_PREPARE_SIGNED_OUT_LEN 0 @@ -14639,12 +15439,13 @@ #define MC_CMD_SET_SECURITY_RULE 0x10f #undef MC_CMD_0x10f_PRIVILEGE_CTG -#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SET_SECURITY_RULE_IN msgrequest */ #define MC_CMD_SET_SECURITY_RULE_IN_LEN 92 /* fields to include in match criteria */ #define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_LEN 4 #define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0 #define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1 #define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1 @@ -14701,8 +15502,10 @@ #define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2 /* Physical port to match (as little-endian 32-bit value) */ #define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28 +#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_LEN 4 /* Reserved; set to 0 */ #define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32 +#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_LEN 4 /* remote IP address to match (as bytes in network order; set last 12 bytes to * 0 for IPv4 address) */ @@ -14719,58 +15522,85 @@ * MC_CMD_SUBNET_MAP_SET_NODE appropriately */ #define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_LEN 4 /* remote portrange ID to match (as little-endian 32-bit value); note that * remote port ranges are matched by mapping the remote port to a "portrange * ID" via a data structure which must already have been configured using * MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE */ #define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72 +#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_LEN 4 /* local portrange ID to match (as little-endian 32-bit value); note that local * port ranges are matched by mapping the local port to a "portrange ID" via a * data structure which must already have been configured using * MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE */ #define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76 +#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_LEN 4 /* set the action for transmitted packets matching this rule */ #define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80 +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_LEN 4 /* enum: make no decision */ -#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0 +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0 /* enum: decide to accept the packet */ -#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1 +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1 /* enum: decide to drop the packet */ -#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2 +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2 +/* enum: inform the TSA controller about some sample of packets matching this + * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with + * either the WHITELIST or BLACKLIST action + */ +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_SAMPLE 0x4 /* enum: do not change the current TX action */ -#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff +#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff /* set the action for received packets matching this rule */ #define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84 +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_LEN 4 /* enum: make no decision */ -#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0 +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0 /* enum: decide to accept the packet */ -#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1 +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1 /* enum: decide to drop the packet */ -#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2 +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2 +/* enum: inform the TSA controller about some sample of packets matching this + * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with + * either the WHITELIST or BLACKLIST action + */ +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_SAMPLE 0x4 /* enum: do not change the current RX action */ -#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff +#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff /* counter ID to associate with this rule; IDs are allocated using * MC_CMD_SECURITY_RULE_COUNTER_ALLOC */ #define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88 +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_LEN 4 /* enum: special value for the null counter ID */ -#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0 +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0 +/* enum: special value to tell the MC to allocate an available counter */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_SW_AUTO 0xeeeeeeee +/* enum: special value to request use of hardware counter (Medford2 only) */ +#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_HW 0xffffffff /* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */ -#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 28 +#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 32 /* new reference count for uses of counter ID */ #define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0 +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_LEN 4 /* constructed match bits for this rule (as a tracing aid only) */ #define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4 #define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12 /* constructed discriminator bits for this rule (as a tracing aid only) */ #define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_LEN 4 /* base location for probes for this rule (as a tracing aid only) */ #define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_LEN 4 /* step for probes for this rule (as a tracing aid only) */ #define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24 +#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_LEN 4 +/* ID for reading back the counter */ +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_OFST 28 +#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_LEN 4 /***********************************/ @@ -14784,14 +15614,15 @@ #define MC_CMD_RESET_SECURITY_RULES 0x110 #undef MC_CMD_0x110_PRIVILEGE_CTG -#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */ #define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4 /* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */ #define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0 +#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_LEN 4 /* enum: special value to reset all physical ports */ -#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff +#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff /* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */ #define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0 @@ -14836,12 +15667,13 @@ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112 #undef MC_CMD_0x112_PRIVILEGE_CTG -#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4 /* the number of new counter IDs to request */ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_LEN 4 /* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4 @@ -14851,6 +15683,7 @@ * requested if resources are unavailable) */ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_LEN 4 /* new counter ID(s) */ #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4 #define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4 @@ -14869,7 +15702,7 @@ #define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113 #undef MC_CMD_0x113_PRIVILEGE_CTG -#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */ #define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4 @@ -14877,6 +15710,7 @@ #define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num)) /* the number of counter IDs to free */ #define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0 +#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_LEN 4 /* the counter ID(s) to free */ #define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4 #define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4 @@ -14900,7 +15734,7 @@ #define MC_CMD_SUBNET_MAP_SET_NODE 0x114 #undef MC_CMD_0x114_PRIVILEGE_CTG -#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */ #define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6 @@ -14908,6 +15742,7 @@ #define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num)) /* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */ #define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0 +#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_LEN 4 /* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer * to the next node, expressed as an offset in the trie memory (i.e. node ID * multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range @@ -14928,7 +15763,7 @@ */ #define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0 #define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2 -#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */ +#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */ #define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0 #define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16 /* final portrange ID for leaf nodes (don't care for branch nodes) */ @@ -14951,7 +15786,7 @@ #define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115 #undef MC_CMD_0x115_PRIVILEGE_CTG -#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */ #define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4 @@ -14982,7 +15817,7 @@ #define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116 #undef MC_CMD_0x116_PRIVILEGE_CTG -#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */ #define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4 @@ -15005,18 +15840,18 @@ #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 /* enum: the IANA allocated UDP port for VXLAN */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 /* enum: the IANA allocated UDP port for Geneve */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 /* tunnel encapsulation protocol (only those named below are supported) */ #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 /* enum: This port will be used for VXLAN on both IPv4 and IPv6 */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 /* enum: This port will be used for Geneve on both IPv4 and IPv6 */ -#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 #define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 @@ -15073,18 +15908,22 @@ #define MC_CMD_RX_BALANCING 0x118 #undef MC_CMD_0x118_PRIVILEGE_CTG -#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_RX_BALANCING_IN msgrequest */ #define MC_CMD_RX_BALANCING_IN_LEN 16 /* The RX port whose upconverter table will be modified */ #define MC_CMD_RX_BALANCING_IN_PORT_OFST 0 +#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4 /* The VLAN priority associated to the table index and vFIFO */ #define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4 +#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4 /* The resulting bit of SRC^DST for indexing the table */ #define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8 +#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4 /* The RX engine to which the vFIFO in the table entry will point to */ #define MC_CMD_RX_BALANCING_IN_ENG_OFST 12 +#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4 /* MC_CMD_RX_BALANCING_OUT msgresponse */ #define MC_CMD_RX_BALANCING_OUT_LEN 0 @@ -15093,12 +15932,7 @@ /***********************************/ /* MC_CMD_TSA_BIND * TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more - * info in respect to the binding protocol. This MCDI command is only available - * over a TLS secure connection between the TSAN and TSAC, and is not available - * to host software. Note- The messages definitions that do comprise this MCDI - * command deemed as provisional. This MCDI command has not yet been used in - * any released code and may change during development. This note will be - * removed once it is regarded as stable. + * info in respect to the binding protocol. */ #define MC_CMD_TSA_BIND 0x119 #undef MC_CMD_0x119_PRIVILEGE_CTG @@ -15108,15 +15942,13 @@ /* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */ #define MC_CMD_TSA_BIND_IN_LEN 4 #define MC_CMD_TSA_BIND_IN_OP_OFST 0 -/* enum: Retrieve the TSAN ID from a TSAN. TSAN ID is a unique identifier for - * the network adapter. More specifically, TSAN ID equals the MAC address of - * the network adapter. TSAN ID is used as part of the TSAN authentication - * protocol. Refer to SF-114946-SW for more information. - */ +#define MC_CMD_TSA_BIND_IN_OP_LEN 4 +/* enum: Obsolete. Use MC_CMD_SECURE_NIC_INFO_IN_STATUS. */ #define MC_CMD_TSA_BIND_OP_GET_ID 0x1 /* enum: Get a binding ticket from the TSAN. The binding ticket is used as part * of the binding procedure to authorize the binding of an adapter to a TSAID. - * Refer to SF-114946-SW for more information. + * Refer to SF-114946-SW for more information. This sub-command is only + * available over a TLS secure connection between the TSAN and TSAC. */ #define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2 /* enum: Opcode associated with the propagation of a private key that TSAN uses @@ -15124,18 +15956,47 @@ * uses this key for a signing operation. TSAC uses the counterpart public key * to verify the signature. Note - The post-binding authentication occurs when * the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to - * SF-114946-SW for more information. + * SF-114946-SW for more information. This sub-command is only available over a + * TLS secure connection between the TSAN and TSAC. */ #define MC_CMD_TSA_BIND_OP_SET_KEY 0x3 -/* enum: Request an unbinding operation. Note- TSAN clears the binding ticket - * from the Nvram section. +/* enum: Request an insecure unbinding operation. This sub-command is available + * for any privileged client. */ #define MC_CMD_TSA_BIND_OP_UNBIND 0x4 - -/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest */ +/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */ +#define MC_CMD_TSA_BIND_OP_UNBIND_EXT 0x5 +/* enum: Opcode associated with the propagation of the unbinding secret token. + * TSAN persists the unbinding secret token. Refer to SF-115479-TC for more + * information. This sub-command is only available over a TLS secure connection + * between the TSAN and TSAC. + */ +#define MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN 0x6 +/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */ +#define MC_CMD_TSA_BIND_OP_DECOMMISSION 0x7 +/* enum: Obsolete. Use MC_CMD_GET_CERTIFICATE. */ +#define MC_CMD_TSA_BIND_OP_GET_CERTIFICATE 0x8 +/* enum: Request a secure unbinding operation using unbinding token. This sub- + * command is available for any privileged client. + */ +#define MC_CMD_TSA_BIND_OP_SECURE_UNBIND 0x9 +/* enum: Request a secure decommissioning operation. This sub-command is + * available for any privileged client. + */ +#define MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION 0xa +/* enum: Test facility that allows an adapter to be configured to behave as if + * Bound to a TSA controller with restricted MCDI administrator operations. + * This operation is primarily intended to aid host driver development. + */ +#define MC_CMD_TSA_BIND_OP_TEST_MCDI 0xb + +/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest: Obsolete. Use + * MC_CMD_SECURE_NIC_INFO_IN_STATUS. + */ #define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20 /* The operation requested. */ #define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_ID_OP_LEN 4 /* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates * the nonce every time as part of the TSAN post-binding authentication * procedure when the TSAN-TSAC connection terminates and TSAN does need to re- @@ -15148,6 +16009,7 @@ #define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4 /* The operation requested. */ #define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_LEN 4 /* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */ #define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5 @@ -15155,6 +16017,7 @@ #define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num)) /* The operation requested. */ #define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_LEN 4 /* This data blob contains the private key generated by the TSAC. TSAN uses * this key for a signing operation. Note- This private key is used in * conjunction with the post-binding TSAN authentication procedure that occurs @@ -15166,26 +16029,261 @@ #define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1 #define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248 -/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Asks for the un-binding procedure */ +/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Request an insecure unbinding + * operation. + */ #define MC_CMD_TSA_BIND_IN_UNBIND_LEN 10 /* The operation requested. */ #define MC_CMD_TSA_BIND_IN_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_UNBIND_OP_LEN 4 /* TSAN unique identifier for the network adapter */ #define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 4 #define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6 -/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse */ +/* MC_CMD_TSA_BIND_IN_UNBIND_EXT msgrequest: Obsolete. Use + * MC_CMD_TSA_BIND_IN_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMIN 93 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMAX 252 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LEN(num) (92+1*(num)) +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_LEN 4 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_OFST 4 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_LEN 6 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_OFST 10 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_LEN 2 +/* This attribute identifies the TSA infrastructure domain. The length of the + * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max + * length. Note- The TSAID is the Organizational Unit Name filed as part of the + * root and server certificates. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_OFST 12 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_NUM 64 +/* Unbinding secret token. The adapter validates this unbinding token by + * comparing it against the one stored on the adapter as part of the + * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for + * more information. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_OFST 76 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_LEN 16 +/* This is the signature of the above mentioned fields- TSANID, TSAID and + * UNBINDTOKEN. As per current requirements, the SIG opaque data blob contains + * ECDSA ECC-384 based signature. The ECC curve is secp384r1. The signature is + * also ASN-1 encoded. Note- The signature is verified based on the public key + * stored into the root certificate that is provisioned on the adapter side. + * This key is known as the PUKtsaid. Refer to SF-115479-TC for more + * information. + */ +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_OFST 92 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MAXNUM 160 + +/* MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_LEN 20 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_LEN 4 +/* Unbinding secret token. TSAN persists the unbinding secret token. Refer to + * SF-115479-TC for more information. + */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_OFST 4 +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_LEN 16 +/* enum: There are situations when the binding process does not complete + * successfully due to key, other attributes corruption at the database level + * (Controller). Adapter can't connect to the controller anymore. To recover, + * make usage of the decommission command that forces the adapter into + * unbinding state. + */ +#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_ADAPTER_BINDING_FAILURE 0x1 + +/* MC_CMD_TSA_BIND_IN_DECOMMISSION msgrequest: Obsolete. Use + * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMIN 109 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMAX 252 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LEN(num) (108+1*(num)) +/* This is the signature of the above mentioned fields- TSAID, USER and REASON. + * As per current requirements, the SIG opaque data blob contains ECDSA ECC-384 + * based signature. The ECC curve is secp384r1. The signature is also ASN-1 + * encoded . Note- The signature is verified based on the public key stored + * into the root certificate that is provisioned on the adapter side. This key + * is known as the PUKtsaid. Refer to SF-115479-TC for more information. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_OFST 108 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MAXNUM 144 +/* The operation requested. */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_LEN 4 +/* This attribute identifies the TSA infrastructure domain. The length of the + * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max + * length. Note- The TSAID is the Organizational Unit Name filed as part of the + * root and server certificates. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_OFST 4 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_NUM 64 +/* User ID that comes, as an example, from the Controller. Note- The 33 byte + * length of this attribute is max length of the linux user name plus null + * character. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_OFST 68 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_LEN 1 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_NUM 33 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_OFST 101 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_LEN 3 +/* Reason of why decommissioning happens Note- The list of reasons, defined as + * part of the enumeration below, can be extended. + */ +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_OFST 104 +#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_LEN 4 + +/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE msgrequest: Obsolete. Use + * MC_CMD_GET_CERTIFICATE. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_LEN 8 +/* The operation requested, must be MC_CMD_TSA_BIND_OP_GET_CERTIFICATE. */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_LEN 4 +/* Type of the certificate to be retrieved. */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_LEN 4 +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_UNUSED 0x0 /* enum */ +/* enum: Adapter Authentication Certificate (AAC). The AAC is used by the + * controller to verify the authenticity of the adapter. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AAC 0x1 +/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is used by + * the controller to verify the validity of AAC. + */ +#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AASC 0x2 + +/* MC_CMD_TSA_BIND_IN_SECURE_UNBIND msgrequest: Request a secure unbinding + * operation using unbinding token. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMIN 97 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMAX 200 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LEN(num) (96+1*(num)) +/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_LEN 4 +/* Type of the message. (MESSAGE_TYPE_xxx) Must be + * MESSAGE_TYPE_TSA_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_LEN 4 +/* TSAN unique identifier for the network adapter */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_OFST 8 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_LEN 6 +/* Align the arguments to 32 bits */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_OFST 14 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_LEN 2 +/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This + * field is for information only, and not used by the firmware. Note- The TSAID + * is the Organizational Unit Name field as part of the root and server + * certificates. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_OFST 16 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_NUM 64 +/* Unbinding secret token. The adapter validates this unbinding token by + * comparing it against the one stored on the adapter as part of the + * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for + * more information. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_OFST 80 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_LEN 16 +/* The signature computed and encoded as specified by MESSAGE_TYPE. */ +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_OFST 96 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MAXNUM 104 + +/* MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION msgrequest: Request a secure + * decommissioning operation. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMIN 113 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMAX 216 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LEN(num) (112+1*(num)) +/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_LEN 4 +/* Type of the message. (MESSAGE_TYPE_xxx) Must be + * MESSAGE_TYPE_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_LEN 4 +/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This + * field is for information only, and not used by the firmware. Note- The TSAID + * is the Organizational Unit Name field as part of the root and server + * certificates. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_OFST 8 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_NUM 64 +/* A NUL padded US-ASCII string containing user name of the creator of the + * decommissioning ticket. This field is for information only, and not used by + * the firmware. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_OFST 72 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_NUM 36 +/* Reason of why decommissioning happens */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_OFST 108 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_LEN 4 +/* enum: There are situations when the binding process does not complete + * successfully due to key, other attributes corruption at the database level + * (Controller). Adapter can't connect to the controller anymore. To recover, + * use the decommission command to force the adapter into unbound state. + */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_ADAPTER_BINDING_FAILURE 0x1 +/* The signature computed and encoded as specified by MESSAGE_TYPE. */ +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_OFST 112 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_LEN 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MINNUM 1 +#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MAXNUM 104 + +/* MC_CMD_TSA_BIND_IN_TEST_MCDI msgrequest: Test mode that emulates MCDI + * interface restrictions of a bound adapter. This operation is intended for + * test use on adapters that are not deployed and bound to a TSA Controller. + * Using it on a Bound adapter will succeed but will not alter the MCDI + * privileges as MCDI operations will already be restricted. + */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_LEN 8 +/* The operation requested must be MC_CMD_TSA_BIND_OP_TEST_MCDI. */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_OFST 0 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_LEN 4 +/* Enable or disable emulation of bound adapter */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_OFST 4 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_LEN 4 +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_DISABLE 0x0 /* enum */ +#define MC_CMD_TSA_BIND_IN_TEST_MCDI_ENABLE 0x1 /* enum */ + +/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse: Obsolete. Use + * MC_CMD_SECURE_NIC_INFO_OUT_STATUS. + */ #define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 15 #define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252 #define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (14+1*(num)) -/* The operation completion code. */ +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_ID that is sent back to + * the caller. + */ #define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_LEN 4 /* Rules engine type. Note- The rules engine type allows TSAC to further * identify the connected endpoint (e.g. TSAN, NIC Emulator) type and take the * proper action accordingly. As an example, TSAC uses the rules engine type to * select the SF key that differs in the case of TSAN vs. NIC Emulator. */ #define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_OFST 4 +#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_LEN 4 /* enum: Hardware rules engine. */ #define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_TSAN 0x1 /* enum: Nic emulator rules engine. */ @@ -15209,8 +16307,11 @@ #define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5 #define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252 #define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num)) -/* The operation completion code. */ +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_TICKET that is sent back + * to the caller. + */ #define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_LEN 4 /* The ticket represents the data blob construct that TSAN sends to TSAC as * part of the binding protocol. From the TSAN perspective the ticket is an * opaque construct. For more info refer to SF-115479-TC. @@ -15222,23 +16323,142 @@ /* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */ #define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4 -/* The operation completion code. */ +/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_KEY that is sent back to + * the caller. + */ #define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_LEN 4 -/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse */ +/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse: Response to insecure unbind request. + */ #define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8 /* Same as MC_CMD_ERR field, but included as 0 in success cases */ #define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0 +#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_LEN 4 /* Extra status information */ #define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4 +#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a binding ticket. */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3 + +/* MC_CMD_TSA_BIND_OUT_UNBIND_EXT msgresponse: Obsolete. Use + * MC_CMD_TSA_BIND_OUT_SECURE_UNBIND. + */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_LEN 8 +/* Same as MC_CMD_ERR field, but included as 0 in success cases */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_OFST 0 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_LEN 4 +/* Extra status information */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_OFST 4 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_LEN 4 /* enum: Unbind successful. */ -#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_OK_UNBOUND 0x0 /* enum: TSANID mismatch */ -#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TSANID 0x1 /* enum: Unable to remove the binding ticket from persistent storage. */ -#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_REMOVE_TICKET 0x2 /* enum: TSAN is not bound to a binding ticket. */ -#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3 +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN msgresponse */ +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_LEN 4 + +/* MC_CMD_TSA_BIND_OUT_DECOMMISSION msgresponse: Obsolete. Use + * MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION. + */ +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_DECOMMISSION that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_LEN 4 + +/* MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE msgresponse */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMIN 9 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMAX 252 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LEN(num) (8+1*(num)) +/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_CERTIFICATE that is sent + * back to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_LEN 4 +/* Type of the certificate. */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_OFST 4 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE/TYPE */ +/* The certificate data. */ +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_OFST 8 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_LEN 1 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MINNUM 1 +#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MAXNUM 244 + +/* MC_CMD_TSA_BIND_OUT_SECURE_UNBIND msgresponse: Response to secure unbind + * request. + */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_LEN 8 +/* The protocol operation code that is sent back to the caller. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_LEN 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_OFST 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a domain. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION msgresponse: Response to secure + * decommission request. + */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_LEN 8 +/* The protocol operation code that is sent back to the caller. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_LEN 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_OFST 4 +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_LEN 4 +/* enum: Unbind successful. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OK_UNBOUND 0x0 +/* enum: TSANID mismatch */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TSANID 0x1 +/* enum: Unable to remove the binding ticket from persistent storage. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_REMOVE_TICKET 0x2 +/* enum: TSAN is not bound to a domain. */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_NOT_BOUND 0x3 +/* enum: Invalid unbind token */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TOKEN 0x4 +/* enum: Invalid signature */ +#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_SIGNATURE 0x5 + +/* MC_CMD_TSA_BIND_OUT_TEST_MCDI msgrequest */ +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_LEN 4 +/* The protocol operation code MC_CMD_TSA_BIND_OP_TEST_MCDI that is sent back + * to the caller. + */ +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_OFST 0 +#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_LEN 4 /***********************************/ @@ -15251,9 +16471,9 @@ * will be loaded at power on or MC reboot, instead of the default ruleset. * Rollback of the currently active ruleset to the cached version (when it is * valid) is also supported. (Medford-only; for use by SolarSecure apps, not - * directly by drivers. See SF-114946-SW.) NOTE - this message definition is - * provisional. It has not yet been used in any released code and may change - * during development. This note will be removed once it is regarded as stable. + * directly by drivers. See SF-114946-SW.) NOTE - The only sub-operation + * allowed in an adapter bound to a TSA controller from the local host is + * OP_GET_CACHED_VERSION. All other sub-operations are prohibited. */ #define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a #undef MC_CMD_0x11a_PRIVILEGE_CTG @@ -15264,18 +16484,19 @@ #define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4 /* the operation to perform */ #define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_LEN 4 /* enum: reports the ruleset version that is cached in persistent storage but * performs no other action */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0 /* enum: rolls back the active state to the cached version. (May fail with * ENOENT if there is no valid cached version.) */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1 /* enum: commits the active state to the persistent cache */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2 /* enum: invalidates the persistent cache without affecting the active state */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3 /* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */ #define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5 @@ -15285,12 +16506,13 @@ * requested operation in the case of rollback, commit, or invalidate) */ #define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_LEN 4 /* enum: persistent cache is invalid (the VERSION field will be empty in this * case) */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0 /* enum: persistent cache is valid */ -#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1 +#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1 /* cached ruleset version (after completion of the requested operation, in the * case of rollback, commit, or invalidate) as an opaque hash value in the same * form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION @@ -15309,7 +16531,7 @@ #define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c #undef MC_CMD_0x11c_PRIVILEGE_CTG -#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9 @@ -15317,8 +16539,10 @@ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num)) /* The tag to be appended */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4 /* The length of the data */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4 /* The data to be contained in the TLV structure */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8 #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1 @@ -15344,6 +16568,7 @@ #define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4 /* Data type to be checked */ #define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4 /* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12 @@ -15351,10 +16576,13 @@ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num)) /* Number of sectors found (test builds only) */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4 /* Number of bytes found (test builds only) */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4 /* Length of signature */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4 /* Signature */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12 #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1 @@ -15380,23 +16608,29 @@ #define MC_CMD_SET_EVQ_TMR_IN_LEN 16 /* Function-relative queue instance */ #define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4 /* Requested value for timer load (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4 /* Requested value for timer reload (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8 +#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4 /* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */ #define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12 -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ -#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4 +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */ /* MC_CMD_SET_EVQ_TMR_OUT msgresponse */ #define MC_CMD_SET_EVQ_TMR_OUT_LEN 8 /* Actual value for timer load (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0 +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4 /* Actual value for timer reload (in nanoseconds) */ #define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4 +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4 /***********************************/ @@ -15415,29 +16649,35 @@ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36 /* Reserved for future use. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4 /* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in * nanoseconds) for each increment of the timer load/reload count. The * requested duration of a timer is this value multiplied by the timer * load/reload count. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4 /* For timers updated via writes to EVQ_TMR_REG, this is the maximum value * allowed for timer load/reload counts. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4 /* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a * multiple of this step size will be rounded in an implementation defined * manner. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4 /* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only * meaningful if MC_CMD_SET_EVQ_TMR is implemented. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4 /* Timer durations requested via MCDI that are not a multiple of this step size * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4 /* For timers updated using the bug35388 workaround, this is the time interval * (in nanoseconds) for each increment of the timer load/reload count. The * requested duration of a timer is this value multiplied by the timer @@ -15445,17 +16685,20 @@ * is enabled. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4 /* For timers updated using the bug35388 workaround, this is the maximum value * allowed for timer load/reload counts. This field is only meaningful if the * bug35388 workaround is enabled. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4 /* For timers updated using the bug35388 workaround, timer load/reload counts * not a multiple of this step size will be rounded in an implementation * defined manner. This field is only meaningful if the bug35388 workaround is * enabled. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4 /***********************************/ @@ -15466,7 +16709,7 @@ #define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d #undef MC_CMD_0x11d_PRIVILEGE_CTG -#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20 @@ -15474,34 +16717,40 @@ * local queue index. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4 /* Will the common pool be used as TX_vFIFO_ULL (1) */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4 -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */ /* enum: Using this interface without TX_vFIFO_ULL is not supported for now */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0 /* Number of buffers to reserve for the common pool */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4 /* TX datapath to which the Common Pool is connected to. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4 /* enum: Extracts information from function */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 /* Network port or RX Engine to which the common pool connects. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4 /* enum: Extracts information from function */ -/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */ +/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */ /* enum: To enable Switch loopback with Rx engine 0 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4 /* enum: To enable Switch loopback with Rx engine 1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5 /* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4 /* ID of the common pool allocated */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4 /***********************************/ @@ -15512,42 +16761,49 @@ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e #undef MC_CMD_0x11e_PRIVILEGE_CTG -#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20 /* Common pool previously allocated to which the new vFIFO will be associated */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4 /* Port or RX engine to associate the vFIFO egress */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4 /* enum: Extracts information from common pool */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */ +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */ /* enum: To enable Switch loopback with Rx engine 0 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4 /* enum: To enable Switch loopback with Rx engine 1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5 /* Minimum number of buffers that the pool must have */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4 /* enum: Do not check the space available */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0 /* Will the vFIFO be used as TX_vFIFO_ULL */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4 /* Network priority of the vFIFO,if applicable */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4 /* enum: Search for the lowest unused priority */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1 /* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8 /* Short vFIFO ID */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4 /* Network priority of the vFIFO */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4 +#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4 /***********************************/ @@ -15558,12 +16814,13 @@ #define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f #undef MC_CMD_0x11f_PRIVILEGE_CTG -#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4 /* Short vFIFO ID */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0 +#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4 /* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0 @@ -15577,12 +16834,13 @@ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121 #undef MC_CMD_0x121_PRIVILEGE_CTG -#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4 /* Common pool ID given when pool allocated */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0 +#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4 /* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0 @@ -15604,16 +16862,17 @@ #define MC_CMD_REKEY 0x123 #undef MC_CMD_0x123_PRIVILEGE_CTG -#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_REKEY_IN msgrequest */ #define MC_CMD_REKEY_IN_LEN 4 /* the type of operation requested */ #define MC_CMD_REKEY_IN_OP_OFST 0 +#define MC_CMD_REKEY_IN_OP_LEN 4 /* enum: Start the rekeying operation */ -#define MC_CMD_REKEY_IN_OP_REKEY 0x0 +#define MC_CMD_REKEY_IN_OP_REKEY 0x0 /* enum: Poll for completion of the rekeying operation */ -#define MC_CMD_REKEY_IN_OP_POLL 0x1 +#define MC_CMD_REKEY_IN_OP_POLL 0x1 /* MC_CMD_REKEY_OUT msgresponse */ #define MC_CMD_REKEY_OUT_LEN 0 @@ -15627,7 +16886,7 @@ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124 #undef MC_CMD_0x124_PRIVILEGE_CTG -#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0 @@ -15636,8 +16895,10 @@ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8 /* Available buffers for the ENG to NET vFIFOs. */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0 +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4 /* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4 +#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4 /***********************************/ @@ -15653,19 +16914,1231 @@ #define MC_CMD_SET_SECURITY_FUSES 0x126 #undef MC_CMD_0x126_PRIVILEGE_CTG -#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_SET_SECURITY_FUSES_IN msgrequest */ #define MC_CMD_SET_SECURITY_FUSES_IN_LEN 4 /* Flags specifying what type of security features are being set */ #define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_LEN 4 #define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_LBN 0 #define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_WIDTH 1 #define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_LBN 1 #define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_WIDTH 1 +#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_LBN 31 +#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_WIDTH 1 /* MC_CMD_SET_SECURITY_FUSES_OUT msgresponse */ #define MC_CMD_SET_SECURITY_FUSES_OUT_LEN 0 +/* MC_CMD_SET_SECURITY_FUSES_V2_OUT msgresponse */ +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_LEN 4 +/* Flags specifying which security features are enforced on the NIC after the + * flags in the request have been applied. See + * MC_CMD_SET_SECURITY_FUSES_IN/FLAGS for flag definitions. + */ +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_TSA_INFO + * Messages sent from TSA adapter to TSA controller. This command is only valid + * when the MCDI header has MESSAGE_TYPE set to MCDI_MESSAGE_TYPE_TSA. This + * command is not sent by the driver to the MC; it is sent from the MC to a TSA + * controller, being treated more like an alert message rather than a command; + * hence the MC does not expect a response in return. Doxbox reference + * SF-117371-SW + */ +#define MC_CMD_TSA_INFO 0x127 +#undef MC_CMD_0x127_PRIVILEGE_CTG + +#define MC_CMD_0x127_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_INFO_IN msgrequest */ +#define MC_CMD_TSA_INFO_IN_LEN 4 +#define MC_CMD_TSA_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_TSA_INFO_IN_OP_LBN 0 +#define MC_CMD_TSA_INFO_IN_OP_WIDTH 16 +/* enum: Information about recently discovered local IP address of the adapter + */ +#define MC_CMD_TSA_INFO_OP_LOCAL_IP 0x1 +/* enum: Information about a sampled packet that either - did not match any + * black/white-list filters and was allowed by the default filter or - did not + * match any black/white-list filters and was denied by the default filter + */ +#define MC_CMD_TSA_INFO_OP_PKT_SAMPLE 0x2 + +/* MC_CMD_TSA_INFO_IN_LOCAL_IP msgrequest: + * + * The TSA controller maintains a list of IP addresses valid for each port of a + * TSA adapter. The TSA controller requires information from the adapter + * inorder to learn new IP addresses assigned to a physical port and to + * identify those that are no longer assigned to the physical port. For this + * purpose, the TSA adapter snoops ARP replys, gratuitous ARP requests and ARP + * probe packets seen on each physical port. This definition describes the + * format of the notification message sent from a TSA adapter to a TSA + * controller related to any information related to a change in IP address + * assignment for a port. Doxbox reference SF-117371. + * + * There may be a possibility of combining multiple notifications in a single + * message in future. When that happens, a new flag can be defined using the + * reserved bits to describe the extended format of this notification. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_LEN 18 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_LEN 4 +/* Additional metadata describing the IP address information such as source of + * information retrieval, type of IP address, physical port number. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_OFST 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_LEN 4 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_LBN 0 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_LBN 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_LBN 16 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_WIDTH 8 +/* enum: ARP reply sent out of the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_TX_ARP 0x0 +/* enum: ARP probe packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_ARP_PROBE 0x1 +/* enum: Gratuitous ARP packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_GRATUITOUS_ARP 0x2 +/* enum: DHCP ACK packet received on the physical port */ +#define MC_CMD_TSA_INFO_IP_REASON_RX_DHCP_ACK 0x3 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_LBN 24 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_LBN 25 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_WIDTH 7 +/* IPV4 address retrieved from the sampled packets. This field is relevant only + * when META_IPV4 is set to 1. + */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_OFST 8 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_LEN 4 +/* Target MAC address retrieved from the sampled packet. */ +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_OFST 12 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_LEN 1 +#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_NUM 6 + +/* MC_CMD_TSA_INFO_IN_PKT_SAMPLE msgrequest: + * + * It is desireable for the TSA controller to learn the traffic pattern of + * packets seen at the network port being monitored. In order to learn about + * the traffic pattern, the TSA controller may want to sample packets seen at + * the network port. Based on the packet samples that the TSA controller + * receives from the adapter, the controller may choose to configure additional + * black-list or white-list rules to allow or block packets as required. + * + * Although the entire sampled packet as seen on the network port is available + * to the MC the length of sampled packet sent to controller is restricted by + * MCDI payload size. Besides, the TSA controller does not require the entire + * packet to make decisions about filter updates. Hence the packet sample being + * passed to the controller is truncated to 128 bytes. This length is large + * enough to hold the ethernet header, IP header and maximum length of + * supported L4 protocol headers (IPv4 only, but can hold IPv6 header too, if + * required in future). + * + * The intention is that any future changes to this message format that are not + * backwards compatible will be defined with a new operation code. + */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_LEN 136 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_OFST 0 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_LEN 4 +/* Additional metadata describing the sampled packet */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_OFST 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_LEN 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_LBN 0 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_WIDTH 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_LBN 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_LBN 9 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_WIDTH 7 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_LBN 16 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_WIDTH 4 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_LBN 16 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_LBN 17 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_WIDTH 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_LBN 18 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_WIDTH 1 +/* 128-byte raw prefix of the sampled packet which includes the ethernet + * header, IP header and L4 protocol header (only IPv4 supported initially). + * This provides the controller enough information about the packet sample to + * report traffic patterns seen on a network port and to make decisions + * concerning rule-set updates. + */ +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_OFST 8 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_LEN 1 +#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_NUM 128 + +/* MC_CMD_TSA_INFO_OUT msgresponse */ +#define MC_CMD_TSA_INFO_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_HOST_INFO + * Commands to appply or retrieve host-related information from an adapter. + * Doxbox reference SF-117371-SW + */ +#define MC_CMD_HOST_INFO 0x128 +#undef MC_CMD_0x128_PRIVILEGE_CTG + +#define MC_CMD_0x128_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_HOST_INFO_IN msgrequest */ +#define MC_CMD_HOST_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_HOST_INFO_IN_OP_LBN 0 +#define MC_CMD_HOST_INFO_IN_OP_WIDTH 16 +/* enum: Read a 16-byte unique host identifier from the adapter. This UUID + * helps to identify the host that an adapter is plugged into. This identifier + * is ideally the system UUID retrieved and set by the UEFI driver. If the UEFI + * driver is unable to extract the system UUID, it would still set a random + * 16-byte value into each supported SF adapter plugged into it. Host UUIDs may + * change if the system is power-cycled, however, they persist across adapter + * resets. If the host UUID was not set on an adapter, due to an unsupported + * version of UEFI driver, then this command returns an error. Doxbox reference + * - SF-117371-SW section 'Host UUID'. + */ +#define MC_CMD_HOST_INFO_OP_GET_UUID 0x0 +/* enum: Set a 16-byte unique host identifier on the adapter to identify the + * host that the adapter is plugged into. See MC_CMD_HOST_INFO_OP_GET_UUID for + * further details. + */ +#define MC_CMD_HOST_INFO_OP_SET_UUID 0x1 + +/* MC_CMD_HOST_INFO_IN_GET_UUID msgrequest */ +#define MC_CMD_HOST_INFO_IN_GET_UUID_LEN 4 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_LEN 4 + +/* MC_CMD_HOST_INFO_OUT_GET_UUID msgresponse */ +#define MC_CMD_HOST_INFO_OUT_GET_UUID_LEN 16 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_OFST 0 +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_LEN 1 +#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_NUM 16 + +/* MC_CMD_HOST_INFO_IN_SET_UUID msgrequest */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_LEN 20 +/* sub-operation code info */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_OFST 0 +#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_LEN 4 +/* 16-byte host UUID set on the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID for + * further details. + */ +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_OFST 4 +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_LEN 1 +#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_NUM 16 + +/* MC_CMD_HOST_INFO_OUT_SET_UUID msgresponse */ +#define MC_CMD_HOST_INFO_OUT_SET_UUID_LEN 0 + + +/***********************************/ +/* MC_CMD_TSAN_INFO + * Get TSA adapter information. TSA controllers query each TSA adapter to learn + * some configuration parameters of each adapter. Doxbox reference SF-117371-SW + * section 'Adapter Information' + */ +#define MC_CMD_TSAN_INFO 0x129 +#undef MC_CMD_0x129_PRIVILEGE_CTG + +#define MC_CMD_0x129_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TSAN_INFO_IN msgrequest */ +#define MC_CMD_TSAN_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_TSAN_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_TSAN_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_TSAN_INFO_IN_OP_LBN 0 +#define MC_CMD_TSAN_INFO_IN_OP_WIDTH 16 +/* enum: Read configuration parameters and IDs that uniquely identify an + * adapter. The parameters include - host identification, adapter + * identification string and number of physical ports on the adapter. + */ +#define MC_CMD_TSAN_INFO_OP_GET_CFG 0x0 + +/* MC_CMD_TSAN_INFO_IN_GET_CFG msgrequest */ +#define MC_CMD_TSAN_INFO_IN_GET_CFG_LEN 4 +/* sub-operation code info */ +#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_OFST 0 +#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_LEN 4 + +/* MC_CMD_TSAN_INFO_OUT_GET_CFG msgresponse */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_LEN 26 +/* Information about the configuration parameters returned in this response. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_LEN 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_WIDTH 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_WIDTH 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_LBN 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_WIDTH 8 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_OFST 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_NUM 16 +/* A unique identifier per adapter. The base MAC address of the card is used + * for this purpose. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_OFST 20 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_NUM 6 + +/* MC_CMD_TSAN_INFO_OUT_GET_CFG_V2 msgresponse */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_LEN 36 +/* Information about the configuration parameters returned in this response. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_OFST 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_LEN 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_WIDTH 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_LBN 0 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_WIDTH 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_LBN 16 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_WIDTH 8 +/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID + * for further details. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_OFST 4 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_NUM 16 +/* A unique identifier per adapter. The base MAC address of the card is used + * for this purpose. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_OFST 20 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_LEN 1 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_NUM 6 +/* Unused bytes, defined for 32-bit alignment of new fields. */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_OFST 26 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_LEN 2 +/* Maximum number of TSA statistics counters in each direction of dataflow + * supported on the card. Note that the statistics counters are always + * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx + * counter. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_OFST 28 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_LEN 4 +/* Width of each statistics counter (represented in bits). This gives an + * indication of wrap point to the user. + */ +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_OFST 32 +#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_LEN 4 + + +/***********************************/ +/* MC_CMD_TSA_STATISTICS + * TSA adapter statistics operations. + */ +#define MC_CMD_TSA_STATISTICS 0x130 +#undef MC_CMD_0x130_PRIVILEGE_CTG + +#define MC_CMD_0x130_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_STATISTICS_IN msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_LEN 4 +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_LEN 4 +/* enum: Get the configuration parameters that describe the TSA statistics + * layout on the adapter. + */ +#define MC_CMD_TSA_STATISTICS_OP_GET_CONFIG 0x0 +/* enum: Read and/or clear TSA statistics counters. */ +#define MC_CMD_TSA_STATISTICS_OP_READ_CLEAR 0x1 + +/* MC_CMD_TSA_STATISTICS_IN_GET_CONFIG msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_LEN 4 +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_LEN 4 + +/* MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG msgresponse */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_LEN 8 +/* Maximum number of TSA statistics counters in each direction of dataflow + * supported on the card. Note that the statistics counters are always + * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx + * counter. + */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_OFST 0 +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_LEN 4 +/* Width of each statistics counter (represented in bits). This gives an + * indication of wrap point to the user. + */ +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_OFST 4 +#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_LEN 4 + +/* MC_CMD_TSA_STATISTICS_IN_READ_CLEAR msgrequest */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMIN 20 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMAX 252 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LEN(num) (16+4*(num)) +/* TSA statistics sub-operation code */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_OFST 0 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_LEN 4 +/* Parameters describing the statistics operation */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_OFST 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_LEN 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_LBN 0 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_WIDTH 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_LBN 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_WIDTH 1 +/* Counter ID list specification type */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_OFST 8 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_LEN 4 +/* enum: The statistics counters are specified as an unordered list of + * individual counter ID. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LIST 0x0 +/* enum: The statistics counters are specified as a range of consecutive + * counter IDs. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_RANGE 0x1 +/* Number of statistics counters */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_OFST 12 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_LEN 4 +/* Counter IDs to be read/cleared. When mode is set to LIST, this entry holds a + * list of counter IDs to be operated on. When mode is set to RANGE, this entry + * holds a single counter ID representing the start of the range of counter IDs + * to be operated on. + */ +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_OFST 16 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_LEN 4 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MINNUM 1 +#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MAXNUM 59 + +/* MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR msgresponse */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMIN 24 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMAX 248 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LEN(num) (8+16*(num)) +/* Number of statistics counters returned in this response */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_OFST 0 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_LEN 4 +/* MC_TSA_STATISTICS_ENTRY Note that this field is expected to start at a + * 64-bit aligned offset + */ +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_OFST 8 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_LEN 16 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MINNUM 1 +#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MAXNUM 15 + +/* MC_TSA_STATISTICS_ENTRY structuredef */ +#define MC_TSA_STATISTICS_ENTRY_LEN 16 +/* Tx statistics counter */ +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_OFST 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LEN 8 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LO_OFST 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_HI_OFST 4 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LBN 0 +#define MC_TSA_STATISTICS_ENTRY_TX_STAT_WIDTH 64 +/* Rx statistics counter */ +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_OFST 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LEN 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LO_OFST 8 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_HI_OFST 12 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LBN 64 +#define MC_TSA_STATISTICS_ENTRY_RX_STAT_WIDTH 64 + + +/***********************************/ +/* MC_CMD_ERASE_INITIAL_NIC_SECRET + * This request causes the NIC to find the initial NIC secret (programmed + * during ATE) in XPM memory and if and only if the NIC has already been + * rekeyed with MC_CMD_REKEY, erase it. This is used by manftest after + * installing TSA binding certificates. See SF-117631-TC. + */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET 0x131 +#undef MC_CMD_0x131_PRIVILEGE_CTG + +#define MC_CMD_0x131_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_ERASE_INITIAL_NIC_SECRET_IN msgrequest */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET_IN_LEN 0 + +/* MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT msgresponse */ +#define MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSA_CONFIG + * TSA adapter configuration operations. This command is used to prepare the + * NIC for TSA binding. + */ +#define MC_CMD_TSA_CONFIG 0x64 +#undef MC_CMD_0x64_PRIVILEGE_CTG + +#define MC_CMD_0x64_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TSA_CONFIG_IN msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_LEN 4 +/* TSA configuration sub-operation code */ +#define MC_CMD_TSA_CONFIG_IN_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_OP_LEN 4 +/* enum: Append a single item to the tsa_config partition. Items will be + * encrypted unless they are declared as non-sensitive. Returns + * MC_CMD_ERR_EEXIST if the tag is already present. + */ +#define MC_CMD_TSA_CONFIG_OP_APPEND 0x1 +/* enum: Reset the tsa_config partition to a clean state. */ +#define MC_CMD_TSA_CONFIG_OP_RESET 0x2 +/* enum: Read back a configured item from tsa_config partition. Returns + * MC_CMD_ERR_ENOENT if the item doesn't exist, or MC_CMD_ERR_EPERM if the item + * is declared as sensitive (i.e. is encrypted). + */ +#define MC_CMD_TSA_CONFIG_OP_READ 0x3 + +/* MC_CMD_TSA_CONFIG_IN_APPEND msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMIN 12 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMAX 252 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LEN(num) (12+1*(num)) +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_APPEND. + */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_LEN 4 +/* The tag to be appended */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_OFST 4 +#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_LEN 4 +/* The length of the data in bytes */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_OFST 8 +#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_LEN 4 +/* The item data */ +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_OFST 12 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_LEN 1 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MINNUM 0 +#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MAXNUM 240 + +/* MC_CMD_TSA_CONFIG_OUT_APPEND msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_APPEND_LEN 0 + +/* MC_CMD_TSA_CONFIG_IN_RESET msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_RESET_LEN 4 +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_RESET. + */ +#define MC_CMD_TSA_CONFIG_IN_RESET_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_RESET_OP_LEN 4 + +/* MC_CMD_TSA_CONFIG_OUT_RESET msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_RESET_LEN 0 + +/* MC_CMD_TSA_CONFIG_IN_READ msgrequest */ +#define MC_CMD_TSA_CONFIG_IN_READ_LEN 8 +/* TSA configuration sub-operation code. The value shall be + * MC_CMD_TSA_CONFIG_OP_READ. + */ +#define MC_CMD_TSA_CONFIG_IN_READ_OP_OFST 0 +#define MC_CMD_TSA_CONFIG_IN_READ_OP_LEN 4 +/* The tag to be read */ +#define MC_CMD_TSA_CONFIG_IN_READ_TAG_OFST 4 +#define MC_CMD_TSA_CONFIG_IN_READ_TAG_LEN 4 + +/* MC_CMD_TSA_CONFIG_OUT_READ msgresponse */ +#define MC_CMD_TSA_CONFIG_OUT_READ_LENMIN 8 +#define MC_CMD_TSA_CONFIG_OUT_READ_LENMAX 252 +#define MC_CMD_TSA_CONFIG_OUT_READ_LEN(num) (8+1*(num)) +/* The tag that was read */ +#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_OFST 0 +#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_LEN 4 +/* The length of the data in bytes */ +#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_OFST 4 +#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_LEN 4 +/* The data of the item. */ +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_OFST 8 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_LEN 1 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MINNUM 0 +#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MAXNUM 244 + +/* MC_TSA_IPV4_ITEM structuredef */ +#define MC_TSA_IPV4_ITEM_LEN 8 +/* Additional metadata describing the IP address information such as the + * physical port number the address is being used on. Unused space in this + * field is reserved for future expansion. + */ +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_OFST 0 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LEN 4 +#define MC_TSA_IPV4_ITEM_PORT_IDX_LBN 0 +#define MC_TSA_IPV4_ITEM_PORT_IDX_WIDTH 8 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LBN 0 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_WIDTH 32 +/* The IPv4 address in little endian byte order. */ +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_OFST 4 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LEN 4 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LBN 32 +#define MC_TSA_IPV4_ITEM_IPV4_ADDR_WIDTH 32 + + +/***********************************/ +/* MC_CMD_TSA_IPADDR + * TSA operations relating to the monitoring and expiry of local IP addresses + * discovered by the controller. These commands are sent from a TSA controller + * to a TSA adapter. + */ +#define MC_CMD_TSA_IPADDR 0x65 +#undef MC_CMD_0x65_PRIVILEGE_CTG + +#define MC_CMD_0x65_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_IPADDR_IN msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_LEN 4 +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_OP_WIDTH 16 +/* enum: Request that the adapter verifies that the IPv4 addresses supplied are + * still in use by the host by sending ARP probes to the host. The MC does not + * wait for a response to the probes and sends an MCDI response to the + * controller once the probes have been sent to the host. The response to the + * probes (if there are any) will be forwarded to the controller using + * MC_CMD_TSA_INFO alerts. + */ +#define MC_CMD_TSA_IPADDR_OP_VALIDATE_IPV4 0x1 +/* enum: Notify the adapter that one or more IPv4 addresses are no longer valid + * for the host of the adapter. The adapter should remove the IPv4 addresses + * from its local cache. + */ +#define MC_CMD_TSA_IPADDR_OP_REMOVE_IPV4 0x2 + +/* MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4 msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMIN 16 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMAX 248 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LEN(num) (8+8*(num)) +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_WIDTH 16 +/* Number of IPv4 addresses to validate. */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_OFST 4 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_LEN 4 +/* The IPv4 addresses to validate, in struct MC_TSA_IPV4_ITEM format. */ +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LEN 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LO_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_HI_OFST 12 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MINNUM 1 +#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MAXNUM 30 + +/* MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4 msgresponse */ +#define MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4_LEN 0 + +/* MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4 msgrequest */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMIN 16 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMAX 248 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LEN(num) (8+8*(num)) +/* Header containing information to identify which sub-operation of this + * command to perform. The header contains a 16-bit op-code. Unused space in + * this field is reserved for future expansion. + */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_OFST 0 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_LEN 4 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_LBN 0 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_WIDTH 16 +/* Number of IPv4 addresses to remove. */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_OFST 4 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_LEN 4 +/* The IPv4 addresses that have expired, in struct MC_TSA_IPV4_ITEM format. */ +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LEN 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LO_OFST 8 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_HI_OFST 12 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MINNUM 1 +#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MAXNUM 30 + +/* MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4 msgresponse */ +#define MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4_LEN 0 + + +/***********************************/ +/* MC_CMD_SECURE_NIC_INFO + * Get secure NIC information. While many of the features reported by these + * commands are related to TSA, they must be supported in firmware where TSA is + * disabled. + */ +#define MC_CMD_SECURE_NIC_INFO 0x132 +#undef MC_CMD_0x132_PRIVILEGE_CTG + +#define MC_CMD_0x132_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SECURE_NIC_INFO_IN msgrequest */ +#define MC_CMD_SECURE_NIC_INFO_IN_LEN 4 +/* sub-operation code info */ +#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_LBN 0 +#define MC_CMD_SECURE_NIC_INFO_IN_OP_WIDTH 16 +/* enum: Get the status of various security settings, all signed along with a + * challenge chosen by the host. + */ +#define MC_CMD_SECURE_NIC_INFO_OP_STATUS 0x0 + +/* MC_CMD_SECURE_NIC_INFO_IN_STATUS msgrequest */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_LEN 24 +/* sub-operation code, must be MC_CMD_SECURE_NIC_INFO_OP_STATUS */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_LEN 4 +/* Type of key to be used to sign response. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_OFST 4 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_UNUSED 0x0 /* enum */ +/* enum: Solarflare adapter authentication key, installed by Manftest. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_SF_ADAPTER_AUTH 0x1 +/* enum: TSA binding key, installed after adapter is bound to a TSA controller. + * This is not supported in firmware which does not support TSA. + */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_TSA_BINDING 0x2 +/* enum: Customer adapter authentication key. Installed by the customer in the + * field, but otherwise similar to the Solarflare adapter authentication key. + */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CUSTOMER_ADAPTER_AUTH 0x3 +/* Random challenge generated by the host. */ +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_OFST 8 +#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_LEN 16 + +/* MC_CMD_SECURE_NIC_INFO_OUT_STATUS msgresponse */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_LEN 420 +/* Length of the signature in MSG_SIGNATURE. */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_OFST 0 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_LEN 4 +/* Signature over the message, starting at MESSAGE_TYPE and continuing to the + * end of the MCDI response, allowing the message format to be extended. The + * signature uses ECDSA 384 encoding in ASN.1 format. It has variable length, + * with a maximum of 384 bytes. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_OFST 4 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN 384 +/* Enum value indicating the type of response. This protects against chosen + * message attacks. The enum values are random rather than sequential to make + * it unlikely that values will be reused should other commands in a different + * namespace need to create signed messages. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_OFST 388 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_LEN 4 +/* enum: Message type value for the response to a + * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. + */ +#define MC_CMD_SECURE_NIC_INFO_STATUS 0xdb4 +/* The challenge provided by the host in the MC_CMD_SECURE_NIC_INFO_IN_STATUS + * message + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_OFST 392 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_LEN 16 +/* The first 32 bits of XPM memory, which include security and flag bits, die + * ID and chip ID revision. The meaning of these bits is defined in + * mc/include/mc/xpm.h in the firmwaresrc repository. + */ +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_OFST 408 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_LEN 4 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_OFST 412 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_OFST 414 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_OFST 416 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_LEN 2 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_OFST 418 +#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_LEN 2 + + +/***********************************/ +/* MC_CMD_TSA_TEST + * A simple ping-pong command just to test the adapter<>controller MCDI + * communication channel. This command makes not changes to the TSA adapter's + * internal state. It is used by the controller just to verify that the MCDI + * communication channel is working fine. This command takes no additonal + * parameters in request or response. + */ +#define MC_CMD_TSA_TEST 0x125 +#undef MC_CMD_0x125_PRIVILEGE_CTG + +#define MC_CMD_0x125_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_TEST_IN msgrequest */ +#define MC_CMD_TSA_TEST_IN_LEN 0 + +/* MC_CMD_TSA_TEST_OUT msgresponse */ +#define MC_CMD_TSA_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSA_RULESET_OVERRIDE + * Override TSA ruleset that is currently active on the adapter. This operation + * does not modify the ruleset itself. This operation provides a mechanism to + * apply an allow-all or deny-all operation on all packets, thereby completely + * ignoring the rule-set configured on the adapter. The main purpose of this + * operation is to provide a deterministic state to the TSA firewall during + * rule-set transitions. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE 0x12a +#undef MC_CMD_0x12a_PRIVILEGE_CTG + +#define MC_CMD_0x12a_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSA_RULESET_OVERRIDE_IN msgrequest */ +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_LEN 4 +/* The override state to apply. */ +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_OFST 0 +#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_LEN 4 +/* enum: No override in place - the existing ruleset is in operation. */ +#define MC_CMD_TSA_RULESET_OVERRIDE_NONE 0x0 +/* enum: Block all packets seen on all datapath channel except those packets + * required for basic configuration of the TSA NIC such as ARPs and TSA- + * communication traffic. Such exceptional traffic is handled differently + * compared to TSA rulesets. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE_BLOCK 0x1 +/* enum: Allow all packets through all datapath channel. The TSA adapter + * behaves like a normal NIC without any firewalls. + */ +#define MC_CMD_TSA_RULESET_OVERRIDE_ALLOW 0x2 + +/* MC_CMD_TSA_RULESET_OVERRIDE_OUT msgresponse */ +#define MC_CMD_TSA_RULESET_OVERRIDE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TSAC_REQUEST + * Generic command to send requests from a TSA controller to a TSA adapter. + * Specific usage is determined by the TYPE field. + */ +#define MC_CMD_TSAC_REQUEST 0x12b +#undef MC_CMD_0x12b_PRIVILEGE_CTG + +#define MC_CMD_0x12b_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_TSAC_REQUEST_IN msgrequest */ +#define MC_CMD_TSAC_REQUEST_IN_LEN 4 +/* The type of request from the controller. */ +#define MC_CMD_TSAC_REQUEST_IN_TYPE_OFST 0 +#define MC_CMD_TSAC_REQUEST_IN_TYPE_LEN 4 +/* enum: Request the adapter to resend localIP information from it's cache. The + * command does not return any IP address information; IP addresses are sent as + * TSA notifications as descibed in MC_CMD_TSA_INFO_IN_LOCAL_IP. + */ +#define MC_CMD_TSAC_REQUEST_LOCALIP 0x0 + +/* MC_CMD_TSAC_REQUEST_OUT msgresponse */ +#define MC_CMD_TSAC_REQUEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SUC_VERSION + * Get the version of the SUC + */ +#define MC_CMD_SUC_VERSION 0x134 +#undef MC_CMD_0x134_PRIVILEGE_CTG + +#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SUC_VERSION_IN msgrequest */ +#define MC_CMD_SUC_VERSION_IN_LEN 0 + +/* MC_CMD_SUC_VERSION_OUT msgresponse */ +#define MC_CMD_SUC_VERSION_OUT_LEN 24 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4 +#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4 +/* The date, in seconds since the Unix epoch, when the firmware image was + * built. + */ +#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16 +#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20 +#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4 + +/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot + * loader. + */ +#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4 +#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0 +#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4 +/* enum: Requests the SUC boot version. */ +#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b + +/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */ +#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4 +/* The SUC boot version */ +#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4 + + +/***********************************/ +/* MC_CMD_SUC_MANFTEST + * Operations to support manftest on SUC based systems. + */ +#define MC_CMD_SUC_MANFTEST 0x135 +#undef MC_CMD_0x135_PRIVILEGE_CTG + +#define MC_CMD_0x135_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND + +/* MC_CMD_SUC_MANFTEST_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_IN_LEN 4 +/* The manftest operation to be performed. */ +#define MC_CMD_SUC_MANFTEST_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_IN_OP_LEN 4 +/* enum: Read serial number and use count. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ 0x0 +/* enum: Update use count on wearout adapter. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE 0x1 +/* enum: Start an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START 0x2 +/* enum: Read the status of an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS 0x3 +/* enum: Read the results of an ADC calibration. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT 0x4 +/* enum: Read the PCIe configuration. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ 0x5 +/* enum: Write the PCIe configuration. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE 0x6 +/* enum: Write FRU information to SUC. The FRU information is taken from the + * FRU_INFORMATION partition. Attempts to write to read-only FRUs are rejected. + */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE 0x7 + +/* MC_CMD_SUC_MANFTEST_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_WEAROUT_READ. + */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_LEN 20 +/* The serial number of the wearout adapter, see SF-112717-PR for format. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_LEN 16 +/* The use count of the wearout adapter. */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_OFST 16 +#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE. + */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_LEN 4 +/* The combined status of the calibration operation. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_LEN 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_LBN 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_WIDTH 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_LBN 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_WIDTH 1 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_LBN 2 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_WIDTH 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_LBN 6 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_WIDTH 2 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT. + */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_LEN 12 +/* The set of calibration results. */ +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_OFST 0 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_LEN 4 +#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_NUM 3 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ. + */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_LEN 4 +/* The PCIe vendor ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_LEN 2 +/* The PCIe device ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_OFST 2 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_LEN 2 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_LEN 8 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE. + */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_LEN 4 +/* The PCIe vendor ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_OFST 4 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_LEN 2 +/* The PCIe device ID. */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_OFST 6 +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_LEN 2 + +/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT_LEN 0 + +/* MC_CMD_SUC_MANFTEST_FRU_WRITE_IN msgrequest */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_LEN 4 +/* The manftest operation to be performed. This must be + * MC_CMD_SUC_MANFTEST_FRU_WRITE + */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_OFST 0 +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_LEN 4 + +/* MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT msgresponse */ +#define MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_CERTIFICATE + * Request a certificate. + */ +#define MC_CMD_GET_CERTIFICATE 0x12c +#undef MC_CMD_0x12c_PRIVILEGE_CTG + +#define MC_CMD_0x12c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CERTIFICATE_IN msgrequest */ +#define MC_CMD_GET_CERTIFICATE_IN_LEN 8 +/* Type of the certificate to be retrieved. */ +#define MC_CMD_GET_CERTIFICATE_IN_TYPE_OFST 0 +#define MC_CMD_GET_CERTIFICATE_IN_TYPE_LEN 4 +#define MC_CMD_GET_CERTIFICATE_IN_UNUSED 0x0 /* enum */ +#define MC_CMD_GET_CERTIFICATE_IN_AAC 0x1 /* enum */ +/* enum: Adapter Authentication Certificate (AAC). The AAC is unique to each + * adapter and is used to verify its authenticity. It is installed by Manftest. + */ +#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH 0x1 +#define MC_CMD_GET_CERTIFICATE_IN_AASC 0x2 /* enum */ +/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is shared + * by a group of adapters (typically a purchase order) and is used to verify + * the validity of AAC along with the SF root certificate. It is installed by + * Manftest. + */ +#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH_SIGNING 0x2 +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AAC 0x3 /* enum */ +/* enum: Customer Adapter Authentication Certificate. The Customer AAC is + * unique to each adapter and is used to verify its authenticity in cases where + * either the AAC is not installed or a customer desires to use their own + * certificate chain. It is installed by the customer. + */ +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH 0x3 +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AASC 0x4 /* enum */ +/* enum: Customer Adapter Authentication Certificate. The Customer AASC is + * shared by a group of adapters and is used to verify the validity of the + * Customer AAC along with the customers root certificate. It is installed by + * the customer. + */ +#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH_SIGNING 0x4 +/* Offset, measured in bytes, relative to the start of the certificate data + * from which the certificate is to be retrieved. + */ +#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_OFST 4 +#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_LEN 4 + +/* MC_CMD_GET_CERTIFICATE_OUT msgresponse */ +#define MC_CMD_GET_CERTIFICATE_OUT_LENMIN 13 +#define MC_CMD_GET_CERTIFICATE_OUT_LENMAX 252 +#define MC_CMD_GET_CERTIFICATE_OUT_LEN(num) (12+1*(num)) +/* Type of the certificate. */ +#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_OFST 0 +#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_CERTIFICATE_IN/TYPE */ +/* Offset, measured in bytes, relative to the start of the certificate data + * from which data in this message starts. + */ +#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_OFST 4 +#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_LEN 4 +/* Total length of the certificate data. */ +#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_OFST 8 +#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_LEN 4 +/* The certificate data. */ +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_OFST 12 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_LEN 1 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MAXNUM 240 + + +/***********************************/ +/* MC_CMD_GET_NIC_GLOBAL + * Get a global value which applies to all PCI functions + */ +#define MC_CMD_GET_NIC_GLOBAL 0x12d +#undef MC_CMD_0x12d_PRIVILEGE_CTG + +#define MC_CMD_0x12d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_NIC_GLOBAL_IN msgrequest */ +#define MC_CMD_GET_NIC_GLOBAL_IN_LEN 4 +/* Key to request value for, see enum values in MC_CMD_SET_NIC_GLOBAL. If the + * given key is unknown to the current firmware, the call will fail with + * ENOENT. + */ +#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_OFST 0 +#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_LEN 4 + +/* MC_CMD_GET_NIC_GLOBAL_OUT msgresponse */ +#define MC_CMD_GET_NIC_GLOBAL_OUT_LEN 4 +/* Value of requested key, see key descriptions below. */ +#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_OFST 0 +#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_NIC_GLOBAL + * Set a global value which applies to all PCI functions. Most global values + * can only be changed under specific conditions, and this call will return an + * appropriate error otherwise (see key descriptions). + */ +#define MC_CMD_SET_NIC_GLOBAL 0x12e +#undef MC_CMD_0x12e_PRIVILEGE_CTG + +#define MC_CMD_0x12e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_NIC_GLOBAL_IN msgrequest */ +#define MC_CMD_SET_NIC_GLOBAL_IN_LEN 8 +/* Key to change value of. Firmware will return ENOENT for keys it doesn't know + * about. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_OFST 0 +#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_LEN 4 +/* enum: Request switching the datapath firmware sub-variant. Currently only + * useful when running the DPDK f/w variant. See key values below, and the DPDK + * section of the EF10 Driver Writers Guide. Note that any driver attaching + * with the SUBVARIANT_AWARE flag cleared is implicitly considered as a request + * to switch back to the default sub-variant, and will thus reset this value. + * If a sub-variant switch happens, all other PCI functions will get their + * resources reset (they will see an MC reboot). + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT 0x1 +/* New value to set, see key descriptions above. */ +#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_OFST 4 +#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_LEN 4 +/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Default sub-variant with support + * for maximum features for the current f/w variant. A request from a + * privileged function to set this particular value will always succeed. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT 0x0 +/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Increases packet rate at the cost + * of not supporting any TX checksum offloads. Only supported when running some + * f/w variants, others will return ENOTSUP (as reported by the homonymous bit + * in MC_CMD_GET_CAPABILITIES_V2). Can only be set when no other drivers are + * attached, and the calling driver must have no resources allocated. See the + * DPDK section of the EF10 Driver Writers Guide for a more detailed + * description with possible error codes. + */ +#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM 0x1 + + +/***********************************/ +/* MC_CMD_LTSSM_TRACE_POLL + * Medford2 hardware has support for logging all LTSSM state transitions to a + * hardware buffer. When built with WITH_LTSSM_TRACE=1, the firmware will + * periodially dump the contents of this hardware buffer to an internal + * firmware buffer for later extraction. + */ +#define MC_CMD_LTSSM_TRACE_POLL 0x12f +#undef MC_CMD_0x12f_PRIVILEGE_CTG + +#define MC_CMD_0x12f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_LTSSM_TRACE_POLL_IN msgrequest: Read transitions from the firmware + * internal buffer. + */ +#define MC_CMD_LTSSM_TRACE_POLL_IN_LEN 4 +/* The maximum number of row that the caller can accept. The format of each row + * is defined in MC_CMD_LTSSM_TRACE_POLL_OUT. + */ +#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_LEN 4 + +/* MC_CMD_LTSSM_TRACE_POLL_OUT msgresponse */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMIN 16 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMAX 248 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LEN(num) (8+8*(num)) +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_OFST 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_LEN 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_LBN 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_LBN 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_LBN 31 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_WIDTH 1 +/* The number of rows present in this response. */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_OFST 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_LEN 4 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LEN 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LO_OFST 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_HI_OFST 12 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MINNUM 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MAXNUM 30 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_LBN 0 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_WIDTH 6 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_LBN 6 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_LBN 7 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_WIDTH 1 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_LBN 8 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_WIDTH 24 +/* The time of the LTSSM transition. Times are reported as fractional + * microseconds since MC boot (wrapping at 2^32us). The fractional part is + * reported in picoseconds. 0 <= TIMESTAMP_PS < 1000000 timestamp in seconds = + * ((TIMESTAMP_US + TIMESTAMP_PS / 1000000) / 1000000) + */ +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_OFST 12 +#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_LEN 4 + #endif /* _SIENA_MC_DRIVER_PCOL_H */ /*! \cidoxg_end */ diff --git a/drivers/net/sfc/base/efx_regs_mcdi_aoe.h b/drivers/net/sfc/base/efx_regs_mcdi_aoe.h new file mode 100644 index 00000000..6aaf212f --- /dev/null +++ b/drivers/net/sfc/base/efx_regs_mcdi_aoe.h @@ -0,0 +1,2914 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright 2008-2018 Solarflare Communications Inc. + * All rights reserved. + */ + +/*! \cidoxg_firmware_mc_cmd */ + +#ifndef _SIENA_MC_DRIVER_PCOL_AOE_H +#define _SIENA_MC_DRIVER_PCOL_AOE_H + + + +/***********************************/ +/* MC_CMD_FC + * Perform an FC operation + */ +#define MC_CMD_FC 0x9 + +/* MC_CMD_FC_IN msgrequest */ +#define MC_CMD_FC_IN_LEN 4 +#define MC_CMD_FC_IN_OP_HDR_OFST 0 +#define MC_CMD_FC_IN_OP_HDR_LEN 4 +#define MC_CMD_FC_IN_OP_LBN 0 +#define MC_CMD_FC_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to FC. */ +#define MC_CMD_FC_OP_NULL 0x1 +/* enum: Unused opcode */ +#define MC_CMD_FC_OP_UNUSED 0x2 +/* enum: MAC driver commands */ +#define MC_CMD_FC_OP_MAC 0x3 +/* enum: Read FC memory */ +#define MC_CMD_FC_OP_READ32 0x4 +/* enum: Write to FC memory */ +#define MC_CMD_FC_OP_WRITE32 0x5 +/* enum: Read FC memory */ +#define MC_CMD_FC_OP_TRC_READ 0x6 +/* enum: Write to FC memory */ +#define MC_CMD_FC_OP_TRC_WRITE 0x7 +/* enum: FC firmware Version */ +#define MC_CMD_FC_OP_GET_VERSION 0x8 +/* enum: Read FC memory */ +#define MC_CMD_FC_OP_TRC_RX_READ 0x9 +/* enum: Write to FC memory */ +#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa +/* enum: SFP parameters */ +#define MC_CMD_FC_OP_SFP 0xb +/* enum: DDR3 test */ +#define MC_CMD_FC_OP_DDR_TEST 0xc +/* enum: Get Crash context from FC */ +#define MC_CMD_FC_OP_GET_ASSERT 0xd +/* enum: Get FPGA Build registers */ +#define MC_CMD_FC_OP_FPGA_BUILD 0xe +/* enum: Read map support commands */ +#define MC_CMD_FC_OP_READ_MAP 0xf +/* enum: FC Capabilities */ +#define MC_CMD_FC_OP_CAPABILITIES 0x10 +/* enum: FC Global flags */ +#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11 +/* enum: FC IO using relative addressing modes */ +#define MC_CMD_FC_OP_IO_REL 0x12 +/* enum: FPGA link information */ +#define MC_CMD_FC_OP_UHLINK 0x13 +/* enum: Configure loopbacks and link on FPGA ports */ +#define MC_CMD_FC_OP_SET_LINK 0x14 +/* enum: Licensing operations relating to AOE */ +#define MC_CMD_FC_OP_LICENSE 0x15 +/* enum: Startup information to the FC */ +#define MC_CMD_FC_OP_STARTUP 0x16 +/* enum: Configure a DMA read */ +#define MC_CMD_FC_OP_DMA 0x17 +/* enum: Configure a timed read */ +#define MC_CMD_FC_OP_TIMED_READ 0x18 +/* enum: Control UART logging */ +#define MC_CMD_FC_OP_LOG 0x19 +/* enum: Get the value of a given clock_id */ +#define MC_CMD_FC_OP_CLOCK 0x1a +/* enum: DDR3/QDR3 parameters */ +#define MC_CMD_FC_OP_DDR 0x1b +/* enum: PTP and timestamp control */ +#define MC_CMD_FC_OP_TIMESTAMP 0x1c +/* enum: Commands for SPI Flash interface */ +#define MC_CMD_FC_OP_SPI 0x1d +/* enum: Commands for diagnostic components */ +#define MC_CMD_FC_OP_DIAG 0x1e +/* enum: External AOE port. */ +#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0 +/* enum: Internal AOE port. */ +#define MC_CMD_FC_IN_PORT_INT_OFST 0x40 + +/* MC_CMD_FC_IN_NULL msgrequest */ +#define MC_CMD_FC_IN_NULL_LEN 4 +#define MC_CMD_FC_IN_CMD_OFST 0 +#define MC_CMD_FC_IN_CMD_LEN 4 + +/* MC_CMD_FC_IN_PHY msgrequest */ +#define MC_CMD_FC_IN_PHY_LEN 5 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* FC PHY driver operation code */ +#define MC_CMD_FC_IN_PHY_OP_OFST 4 +#define MC_CMD_FC_IN_PHY_OP_LEN 1 +/* enum: PHY init handler */ +#define MC_CMD_FC_OP_PHY_OP_INIT 0x1 +/* enum: PHY reconfigure handler */ +#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2 +/* enum: PHY reboot handler */ +#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3 +/* enum: PHY get_supported_cap handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4 +/* enum: PHY get_config handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5 +/* enum: PHY get_media_info handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6 +/* enum: PHY set_led handler */ +#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7 +/* enum: PHY lasi_interrupt handler */ +#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8 +/* enum: PHY check_link handler */ +#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9 +/* enum: PHY fill_stats handler */ +#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa +/* enum: PHY bpx_link_state_changed handler */ +#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb +/* enum: PHY get_state handler */ +#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc +/* enum: PHY start_bist handler */ +#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd +/* enum: PHY poll_bist handler */ +#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe +/* enum: PHY nvram_test handler */ +#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf +/* enum: PHY relinquish handler */ +#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10 +/* enum: PHY read connection from FC - may be not required */ +#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11 +/* enum: PHY read flags from FC - may be not required */ +#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12 + +/* MC_CMD_FC_IN_PHY_INIT msgrequest */ +#define MC_CMD_FC_IN_PHY_INIT_LEN 4 +#define MC_CMD_FC_IN_PHY_CMD_OFST 0 +#define MC_CMD_FC_IN_PHY_CMD_LEN 4 + +/* MC_CMD_FC_IN_MAC msgrequest */ +#define MC_CMD_FC_IN_MAC_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_MAC_HEADER_OFST 4 +#define MC_CMD_FC_IN_MAC_HEADER_LEN 4 +#define MC_CMD_FC_IN_MAC_OP_LBN 0 +#define MC_CMD_FC_IN_MAC_OP_WIDTH 8 +/* enum: MAC reconfigure handler */ +#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1 +/* enum: MAC Set command - same as MC_CMD_SET_MAC */ +#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2 +/* enum: MAC statistics */ +#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3 +/* enum: MAC RX statistics */ +#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6 +/* enum: MAC TX statistics */ +#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7 +/* enum: MAC Read status */ +#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8 +#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8 +#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8 +/* enum: External FPGA port. */ +#define MC_CMD_FC_PORT_EXT 0x0 +/* enum: Internal Siena-facing FPGA ports. */ +#define MC_CMD_FC_PORT_INT 0x1 +#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16 +#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8 +#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24 +#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8 +/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are + * irrelevant. Port number is derived from pci_fn; passed in FC header. + */ +#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0 +/* enum: Override default port number. Port number determined by fields + * PORT_TYPE and PORT_IDX. + */ +#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1 + +/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */ +#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */ +#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ +/* MTU size */ +#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8 +#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_LEN 4 +/* Drain Tx FIFO */ +#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12 +#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_LEN 4 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16 +#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_LEN 4 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1 +#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1 +#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28 +#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_LEN 4 + +/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */ +#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */ +#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */ +#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */ +#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */ +/* MC Statistics index */ +#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8 +#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_LEN 4 +#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12 +#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_LEN 4 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1 +#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1 +#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2 +#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1 +/* Number of statistics to read */ +#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16 +#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_LEN 4 +#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */ +#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */ + +/* MC_CMD_FC_IN_READ32 msgrequest */ +#define MC_CMD_FC_IN_READ32_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4 +#define MC_CMD_FC_IN_READ32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8 +#define MC_CMD_FC_IN_READ32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12 +#define MC_CMD_FC_IN_READ32_NUMWORDS_LEN 4 + +/* MC_CMD_FC_IN_WRITE32 msgrequest */ +#define MC_CMD_FC_IN_WRITE32_LENMIN 16 +#define MC_CMD_FC_IN_WRITE32_LENMAX 252 +#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num)) +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4 +#define MC_CMD_FC_IN_WRITE32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8 +#define MC_CMD_FC_IN_WRITE32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12 +#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4 +#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1 +#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60 + +/* MC_CMD_FC_IN_TRC_READ msgrequest */ +#define MC_CMD_FC_IN_TRC_READ_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_READ_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_READ_CHANNEL_LEN 4 + +/* MC_CMD_FC_IN_TRC_WRITE msgrequest */ +#define MC_CMD_FC_IN_TRC_WRITE_LEN 28 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_WRITE_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_LEN 4 +#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12 +#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4 +#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4 + +/* MC_CMD_FC_IN_GET_VERSION msgrequest */ +#define MC_CMD_FC_IN_GET_VERSION_LEN 4 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ + +/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */ +#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_RX_READ_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_LEN 4 + +/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */ +#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8 +#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12 +#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4 +#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2 + +/* MC_CMD_FC_IN_SFP msgrequest */ +#define MC_CMD_FC_IN_SFP_LEN 28 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* Link speed is 100, 1000, 10000, 40000 */ +#define MC_CMD_FC_IN_SFP_SPEED_OFST 4 +#define MC_CMD_FC_IN_SFP_SPEED_LEN 4 +/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */ +#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8 +#define MC_CMD_FC_IN_SFP_COPPER_LEN_LEN 4 +/* Not relevant for cards with QSFP modules. For older cards, true if module is + * a dual speed SFP+ module. + */ +#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12 +#define MC_CMD_FC_IN_SFP_DUAL_SPEED_LEN 4 +/* True if an SFP Module is present (other fields valid when true) */ +#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16 +#define MC_CMD_FC_IN_SFP_PRESENT_LEN 4 +/* The type of the SFP+ Module. For later cards with QSFP modules, this field + * is unused and the type is communicated by other means. + */ +#define MC_CMD_FC_IN_SFP_TYPE_OFST 20 +#define MC_CMD_FC_IN_SFP_TYPE_LEN 4 +/* Capabilities corresponding to 1 bits. */ +#define MC_CMD_FC_IN_SFP_CAPS_OFST 24 +#define MC_CMD_FC_IN_SFP_CAPS_LEN 4 + +/* MC_CMD_FC_IN_DDR_TEST msgrequest */ +#define MC_CMD_FC_IN_DDR_TEST_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 +#define MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 +#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0 +#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8 +/* enum: DRAM Test Start */ +#define MC_CMD_FC_OP_DDR_TEST_START 0x1 +/* enum: DRAM Test Poll */ +#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2 + +/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */ +#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8 +#define MC_CMD_FC_IN_DDR_TEST_START_MASK_LEN 4 +#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0 +#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1 +#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1 +#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1 +#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2 +#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1 +#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3 +#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1 + +/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */ +#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12 +#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0 +#define MC_CMD_FC_IN_DDR_TEST_CMD_LEN 4 +/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */ +/* Clear previous test result and prepare for restarting DDR test */ +#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8 +#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_LEN 4 + +/* MC_CMD_FC_IN_GET_ASSERT msgrequest */ +#define MC_CMD_FC_IN_GET_ASSERT_LEN 4 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ + +/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */ +#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* FPGA build info operation code */ +#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4 +#define MC_CMD_FC_IN_FPGA_BUILD_OP_LEN 4 +/* enum: Get the build registers */ +#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1 +/* enum: Get the services registers */ +#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2 +/* enum: Get the BSP version */ +#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3 +/* enum: Get build register for V2 (SFA974X) */ +#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4 +/* enum: GEt the services register for V2 (SFA974X) */ +#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5 + +/* MC_CMD_FC_IN_READ_MAP msgrequest */ +#define MC_CMD_FC_IN_READ_MAP_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 +#define MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 +#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0 +#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8 +/* enum: Get the number of map regions */ +#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1 +/* enum: Get the specified map */ +#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2 + +/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */ +#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */ +#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_MAP_INDEX_OFST 8 +#define MC_CMD_FC_IN_MAP_INDEX_LEN 4 + +/* MC_CMD_FC_IN_CAPABILITIES msgrequest */ +#define MC_CMD_FC_IN_CAPABILITIES_LEN 4 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ + +/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */ +#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_LEN 4 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5 +#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1 + +/* MC_CMD_FC_IN_IO_REL msgrequest */ +#define MC_CMD_FC_IN_IO_REL_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 +#define MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 +#define MC_CMD_FC_IN_IO_REL_OP_LBN 0 +#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8 +/* enum: Get the base address that the FC applies to relative commands */ +#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1 +/* enum: Read data */ +#define MC_CMD_FC_IN_IO_REL_READ32 0x2 +/* enum: Write data */ +#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3 +#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8 +#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8 +/* enum: Application address space */ +#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1 +/* enum: Flash address space */ +#define MC_CMD_FC_COMP_TYPE_FLASH 0x2 + +/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */ +#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */ + +/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */ +#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8 +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12 +#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16 +#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_LEN 4 + +/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */ +#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20 +#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252 +#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num)) +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */ +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8 +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_LEN 4 +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12 +#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_LEN 4 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1 +#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59 + +/* MC_CMD_FC_IN_UHLINK msgrequest */ +#define MC_CMD_FC_IN_UHLINK_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 +#define MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 +#define MC_CMD_FC_IN_UHLINK_OP_LBN 0 +#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8 +/* enum: Get PHY configuration info */ +#define MC_CMD_FC_OP_UHLINK_PHY 0x1 +/* enum: Get MAC configuration info */ +#define MC_CMD_FC_OP_UHLINK_MAC 0x2 +/* enum: Get Rx eye table */ +#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3 +/* enum: Get Rx eye plot */ +#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4 +/* enum: Get Rx eye plot */ +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5 +/* enum: Retune Rx settings */ +#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6 +/* enum: Set loopback mode on fpga port */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7 +/* enum: Get loopback mode config state on fpga port */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8 +#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8 +#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8 +#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16 +#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8 +#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24 +#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8 +/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are + * irrelevant. Port number is derived from pci_fn; passed in FC header. + */ +#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0 +/* enum: Override default port number. Port number determined by fields + * PORT_TYPE and PORT_IDX. + */ +#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1 + +/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */ +#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */ +#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */ +#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8 +#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_LEN 4 +#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */ + +/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */ +#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */ +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_LEN 4 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_LEN 4 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16 +#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_LEN 4 +#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */ + +/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */ +#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ + +/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8 +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_LEN 4 +#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */ +#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */ +#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12 +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_LEN 4 +#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */ +#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */ + +/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */ +/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */ +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8 +#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_LEN 4 + +/* MC_CMD_FC_IN_SET_LINK msgrequest */ +#define MC_CMD_FC_IN_SET_LINK_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4 +#define MC_CMD_FC_IN_SET_LINK_MODE_LEN 4 +#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8 +#define MC_CMD_FC_IN_SET_LINK_SPEED_LEN 4 +#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12 +#define MC_CMD_FC_IN_SET_LINK_FLAGS_LEN 4 +#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0 +#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1 +#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1 +#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1 +#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2 +#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1 + +/* MC_CMD_FC_IN_LICENSE msgrequest */ +#define MC_CMD_FC_IN_LICENSE_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_LICENSE_OP_OFST 4 +#define MC_CMD_FC_IN_LICENSE_OP_LEN 4 +#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */ +#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */ + +/* MC_CMD_FC_IN_STARTUP msgrequest */ +#define MC_CMD_FC_IN_STARTUP_LEN 40 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4 +#define MC_CMD_FC_IN_STARTUP_BASE_LEN 4 +#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8 +#define MC_CMD_FC_IN_STARTUP_LENGTH_LEN 4 +/* Length of identifier */ +#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12 +#define MC_CMD_FC_IN_STARTUP_IDLENGTH_LEN 4 +/* Identifier for AOE FPGA */ +#define MC_CMD_FC_IN_STARTUP_ID_OFST 16 +#define MC_CMD_FC_IN_STARTUP_ID_LEN 1 +#define MC_CMD_FC_IN_STARTUP_ID_NUM 24 + +/* MC_CMD_FC_IN_DMA msgrequest */ +#define MC_CMD_FC_IN_DMA_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DMA_OP_OFST 4 +#define MC_CMD_FC_IN_DMA_OP_LEN 4 +#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */ +#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */ + +/* MC_CMD_FC_IN_DMA_STOP msgrequest */ +#define MC_CMD_FC_IN_DMA_STOP_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DMA_OP_OFST 4 */ +/* MC_CMD_FC_IN_DMA_OP_LEN 4 */ +/* FC supplied handle */ +#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8 +#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_IN_DMA_READ msgrequest */ +#define MC_CMD_FC_IN_DMA_READ_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DMA_OP_OFST 4 */ +/* MC_CMD_FC_IN_DMA_OP_LEN 4 */ +#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8 +#define MC_CMD_FC_IN_DMA_READ_OFFSET_LEN 4 +#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12 +#define MC_CMD_FC_IN_DMA_READ_LENGTH_LEN 4 + +/* MC_CMD_FC_IN_TIMED_READ msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 +#define MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 +#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */ + +/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */ +/* Host supplied handle (unique) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_LEN 4 +/* Address into which to transfer data in host */ +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16 +/* AOE address from which to transfer data */ +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24 +/* Length of AOE transfer (total) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28 +#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_LEN 4 +/* Length of host transfer (total) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32 +#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_LEN 4 +/* Offset back from aoe_address to apply operation to */ +#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36 +#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_LEN 4 +/* Data to apply at offset */ +#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40 +#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_LEN 4 +#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44 +#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_LEN 4 +#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0 +#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2 +#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1 +#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3 +#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2 +#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */ +#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */ +/* Period at which reads are performed (100ms units) */ +#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48 +#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_LEN 4 + +/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */ +/* FC supplied handle */ +#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8 +#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */ +#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */ +/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */ +/* FC supplied handle */ +#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8 +#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_IN_LOG msgrequest */ +#define MC_CMD_FC_IN_LOG_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_LOG_OP_OFST 4 +#define MC_CMD_FC_IN_LOG_OP_LEN 4 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */ +#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */ + +/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_LOG_OP_OFST 4 */ +/* MC_CMD_FC_IN_LOG_OP_LEN 4 */ +/* Partition offset into flash */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_LEN 4 +/* Partition length */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_LEN 4 +/* Partition erase size */ +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16 +#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_LEN 4 + +/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */ +#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_LOG_OP_OFST 4 */ +/* MC_CMD_FC_IN_LOG_OP_LEN 4 */ +/* Enable/disable printing to JTAG UART */ +#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8 +#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_LEN 4 + +/* MC_CMD_FC_IN_CLOCK msgrequest: Perform a clock operation */ +#define MC_CMD_FC_IN_CLOCK_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_CLOCK_OP_OFST 4 +#define MC_CMD_FC_IN_CLOCK_OP_LEN 4 +#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */ +#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */ +#define MC_CMD_FC_IN_CLOCK_ID_OFST 8 +#define MC_CMD_FC_IN_CLOCK_ID_LEN 4 +#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */ +#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */ + +/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest: Retrieve the clock value of the + * specified clock + */ +#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */ +/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */ + +/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest: Set the clock value of the specified + * clock + */ +#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */ +/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */ +/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */ +/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */ +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20 +#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_LEN 4 + +/* MC_CMD_FC_IN_DDR msgrequest */ +#define MC_CMD_FC_IN_DDR_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DDR_OP_OFST 4 +#define MC_CMD_FC_IN_DDR_OP_LEN 4 +#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */ +#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */ +#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_OFST 8 +#define MC_CMD_FC_IN_DDR_BANK_LEN 4 +#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */ +#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */ +#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */ + +/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */ +#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ +/* MC_CMD_FC_IN_DDR_OP_LEN 4 */ +/* Affected bank */ +/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ +/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */ +/* Flags */ +#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12 +#define MC_CMD_FC_IN_DDR_FLAGS_LEN 4 +#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */ +/* 128-byte page of serial presence detect data read from module's EEPROM */ +#define MC_CMD_FC_IN_DDR_SPD_OFST 16 +#define MC_CMD_FC_IN_DDR_SPD_LEN 1 +#define MC_CMD_FC_IN_DDR_SPD_NUM 128 +/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */ +#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144 +#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_LEN 4 + +/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */ +#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ +/* MC_CMD_FC_IN_DDR_OP_LEN 4 */ +/* Affected bank */ +/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ +/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */ +/* Size of DDR */ +#define MC_CMD_FC_IN_DDR_SIZE_OFST 12 +#define MC_CMD_FC_IN_DDR_SIZE_LEN 4 + +/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */ +#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* MC_CMD_FC_IN_DDR_OP_OFST 4 */ +/* MC_CMD_FC_IN_DDR_OP_LEN 4 */ +/* Affected bank */ +/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */ +/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */ + +/* MC_CMD_FC_IN_TIMESTAMP msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* FC timestamp operation code */ +#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_OP_LEN 4 +/* enum: Read transmit timestamp(s) */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0 +/* enum: Read snapshot timestamps */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1 +/* enum: Clear all transmit timestamps */ +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2 + +/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_LEN 4 +/* Control filtering of the returned timestamp and sequence number specified + * here + */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_LEN 4 +/* enum: Return most recent timestamp. No filtering */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0 +/* enum: Match timestamp against the PTP clock ID, port number and sequence + * number specified + */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1 +/* Clock identity of PTP packet for which timestamp required */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16 +/* Port number of PTP packet for which timestamp required */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_LEN 4 +/* Sequence number of PTP packet for which timestamp required */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24 +#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_LEN 4 + +/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_LEN 4 + +/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */ +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4 +#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_LEN 4 + +/* MC_CMD_FC_IN_SPI msgrequest */ +#define MC_CMD_FC_IN_SPI_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* Basic commands for SPI Flash. */ +#define MC_CMD_FC_IN_SPI_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_OP_LEN 4 +/* enum: SPI Flash read */ +#define MC_CMD_FC_IN_SPI_READ 0x0 +/* enum: SPI Flash write */ +#define MC_CMD_FC_IN_SPI_WRITE 0x1 +/* enum: SPI Flash erase */ +#define MC_CMD_FC_IN_SPI_ERASE 0x2 + +/* MC_CMD_FC_IN_SPI_READ msgrequest */ +#define MC_CMD_FC_IN_SPI_READ_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_READ_OP_LEN 4 +#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8 +#define MC_CMD_FC_IN_SPI_READ_ADDR_LEN 4 +#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12 +#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_LEN 4 + +/* MC_CMD_FC_IN_SPI_WRITE msgrequest */ +#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16 +#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252 +#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num)) +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_WRITE_OP_LEN 4 +#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8 +#define MC_CMD_FC_IN_SPI_WRITE_ADDR_LEN 4 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60 + +/* MC_CMD_FC_IN_SPI_ERASE msgrequest */ +#define MC_CMD_FC_IN_SPI_ERASE_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4 +#define MC_CMD_FC_IN_SPI_ERASE_OP_LEN 4 +#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8 +#define MC_CMD_FC_IN_SPI_ERASE_ADDR_LEN 4 +#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12 +#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_LEN 4 + +/* MC_CMD_FC_IN_DIAG msgrequest */ +#define MC_CMD_FC_IN_DIAG_LEN 8 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +/* Operation code indicating component type */ +#define MC_CMD_FC_IN_DIAG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_OP_LEN 4 +/* enum: Power noise generator. */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0 +/* enum: DDR soak test component. */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1 +/* enum: Diagnostics datapath control component. */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2 + +/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_LEN 4 +/* Sub-opcode describing the operation to be carried out */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_LEN 4 +/* enum: Read the configuration (the 32-bit values in each of the clock enable + * count and toggle count registers) + */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0 +/* enum: Write a new configuration to the clock enable count and toggle count + * registers + */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1 + +/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_LEN 4 + +/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_LEN 4 +/* The 32-bit value to be written to the toggle count register */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_LEN 4 +/* The 32-bit value to be written to the clock enable count register */ +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16 +#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_LEN 4 + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_LEN 4 +/* Sub-opcode describing the operation to be carried out */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_LEN 4 +/* enum: Starts DDR soak test on selected banks */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0 +/* enum: Read status of DDR soak test */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1 +/* enum: Stop test */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2 +/* enum: Set or clear bit that triggers fake errors. These cause subsequent + * tests to fail until the bit is cleared. + */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3 + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_LEN 4 +/* Mask of DDR banks to be tested */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_LEN 4 +/* Pattern to use in the soak test */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */ +/* Either multiple automatic tests until a STOP command is issued, or one + * single test + */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_LEN 4 +/* DDR bank to read status from */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_LEN 4 +#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */ +#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */ +#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */ +#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */ +#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_LEN 4 +/* Mask of DDR banks to be tested */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_LEN 4 + +/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_LEN 4 +/* Mask of DDR banks to set/clear error flag on */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_LEN 4 +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_LEN 4 +/* Sub-opcode describing the operation to be carried out */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_LEN 4 +/* enum: Set a known datapath configuration */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0 +/* enum: Apply raw config to datapath control registers */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1 + +/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_LEN 4 +/* Datapath configuration identifier */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_LEN 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */ + +/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24 +/* MC_CMD_FC_IN_CMD_OFST 0 */ +/* MC_CMD_FC_IN_CMD_LEN 4 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_LEN 4 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_LEN 4 +/* Value to write into control register 1 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_LEN 4 +/* Value to write into control register 2 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_LEN 4 +/* Value to write into control register 3 */ +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20 +#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_LEN 4 + +/* MC_CMD_FC_OUT msgresponse */ +#define MC_CMD_FC_OUT_LEN 0 + +/* MC_CMD_FC_OUT_NULL msgresponse */ +#define MC_CMD_FC_OUT_NULL_LEN 0 + +/* MC_CMD_FC_OUT_READ32 msgresponse */ +#define MC_CMD_FC_OUT_READ32_LENMIN 4 +#define MC_CMD_FC_OUT_READ32_LENMAX 252 +#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num)) +#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0 +#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4 +#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1 +#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63 + +/* MC_CMD_FC_OUT_WRITE32 msgresponse */ +#define MC_CMD_FC_OUT_WRITE32_LEN 0 + +/* MC_CMD_FC_OUT_TRC_READ msgresponse */ +#define MC_CMD_FC_OUT_TRC_READ_LEN 16 +#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0 +#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4 +#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4 + +/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */ +#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0 + +/* MC_CMD_FC_OUT_GET_VERSION msgresponse */ +#define MC_CMD_FC_OUT_GET_VERSION_LEN 12 +#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_LEN 4 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8 + +/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */ +#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8 +#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0 +#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4 +#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2 + +/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */ +#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0 + +/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */ +#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0 + +/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */ +#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0 + +/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */ +#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4 +#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0 +#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_LEN 4 + +/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */ +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3) +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4 +#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS +#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */ +#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */ +#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */ +#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */ +#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */ +#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */ +#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */ +#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */ +#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */ +#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */ +#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */ +#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */ +#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */ +/* enum: (Last entry) */ +#define MC_CMD_FC_MAC_RX_NSTATS 0x19 + +/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */ +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3) +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4 +#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS +#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */ +#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */ +#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */ +#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */ +#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */ +#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */ +#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */ +#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */ +#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */ +#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */ +#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */ +#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */ +#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */ +/* enum: (Last entry) */ +#define MC_CMD_FC_MAC_TX_NSTATS 0x16 + +/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */ +#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3) +/* MAC Statistics */ +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4 +#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK + +/* MC_CMD_FC_OUT_MAC msgresponse */ +#define MC_CMD_FC_OUT_MAC_LEN 0 + +/* MC_CMD_FC_OUT_SFP msgresponse */ +#define MC_CMD_FC_OUT_SFP_LEN 0 + +/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */ +#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0 + +/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_LEN 4 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8 +/* enum: Test not yet initiated */ +#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0 +/* enum: Test is in progress */ +#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1 +/* enum: Timed completed */ +#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2 +/* enum: Test did not complete in specified time */ +#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1 +/* Test result from FPGA */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_LEN 4 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5 +#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */ +#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */ + +/* MC_CMD_FC_OUT_DDR_TEST msgresponse */ +#define MC_CMD_FC_OUT_DDR_TEST_LEN 0 + +/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */ +#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144 +/* Assertion status flag. */ +#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8 +#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8 +/* enum: No crash data available */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 +/* enum: New crash data available */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 +/* enum: Crash data has been sent */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 +#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0 +#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8 +/* enum: No crash has been recorded. */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 +/* enum: Crash due to exception. */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 +/* enum: Crash due to assertion. */ +#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 +/* Failing PC value */ +#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8 +#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4 +#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31 +/* Exception Type */ +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132 +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_LEN 4 +/* Instruction at which exception occurred */ +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136 +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_LEN 4 +/* BAD Address that triggered address-based exception */ +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140 +#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_LEN 4 + +/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */ +#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8 +#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */ +#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10 +#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18 +#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27 +#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2 +#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1 +#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */ +#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4 +#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */ +#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1 +/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */ +/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */ +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28 +#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30 +#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31 +#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4 +/* Build timestamp (seconds since epoch) */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1 +/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */ +/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */ +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_LEN 4 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0 +#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16 + +/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */ +#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4 +/* Qsys system ID */ +#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0 +#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_LEN 4 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4 +#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8 +#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0 +#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4 + +/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */ +#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4 +/* Number of maps */ +#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0 +#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_LEN 4 + +/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164 +/* Index of the map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_LEN 4 +/* Options for the map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_LEN 4 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */ +/* Address of start of map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12 +/* Length of address map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20 +/* Component information field */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_LEN 4 +/* License expiry data for map */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32 +/* Name of the component */ +#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1 +#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128 + +/* MC_CMD_FC_OUT_READ_MAP msgresponse */ +#define MC_CMD_FC_OUT_READ_MAP_LEN 0 + +/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */ +#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8 +/* Number of internal ports */ +#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0 +#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_LEN 4 +/* Number of external ports */ +#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4 +#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_LEN 4 + +/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */ +#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0 +#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_LEN 4 + +/* MC_CMD_FC_OUT_IO_REL msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_LEN 0 + +/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_LEN 4 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4 +#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_LEN 4 + +/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4 +#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252 +#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num)) +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0 +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4 +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1 +#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63 + +/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */ +#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16 +/* Transceiver Transmit settings */ +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16 +/* Transceiver Receive settings */ +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16 +/* Rx eye opening */ +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16 +/* PCS status word */ +#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16 +#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_LEN 4 +/* Link status word */ +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1 +/* Current SFp parameters applied */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20 +/* Link speed is 100, 1000, 10000 */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_LEN 4 +/* Length of copper cable - zero when not relevant */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_LEN 4 +/* True if a dual speed SFP+ module */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_LEN 4 +/* True if an SFP Module is present (other fields valid when true) */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_LEN 4 +/* The type of the SFP+ Module */ +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40 +#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_LEN 4 +/* PHY config flags */ +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2 +#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1 + +/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20 +/* MAC configuration applied */ +#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_LEN 4 +/* MTU size */ +#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_LEN 4 +/* IF Mode status */ +#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8 +#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_LEN 4 +/* MAC address configured */ +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12 +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8 +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12 +#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16 + +/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3) +/* Rx Eye measurements */ +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK + +/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3) +/* Has the eye plot dump completed and data returned is valid? */ +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_LEN 4 +/* Rx Eye binary plot */ +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8 +#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK + +/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0 + +/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4 +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0 +#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_LEN 4 + +/* MC_CMD_FC_OUT_UHLINK msgresponse */ +#define MC_CMD_FC_OUT_UHLINK_LEN 0 + +/* MC_CMD_FC_OUT_SET_LINK msgresponse */ +#define MC_CMD_FC_OUT_SET_LINK_LEN 0 + +/* MC_CMD_FC_OUT_LICENSE msgresponse */ +#define MC_CMD_FC_OUT_LICENSE_LEN 12 +/* Count of valid keys */ +#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0 +#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_LEN 4 +/* Count of invalid keys */ +#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4 +#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_LEN 4 +/* Count of blacklisted keys */ +#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8 +#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_LEN 4 + +/* MC_CMD_FC_OUT_STARTUP msgresponse */ +#define MC_CMD_FC_OUT_STARTUP_LEN 4 +/* Capabilities of the FPGA/FC */ +#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0 +#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_LEN 4 +#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0 +#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1 + +/* MC_CMD_FC_OUT_DMA_READ msgresponse */ +#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1 +#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252 +#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num)) +/* The data read */ +#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0 +#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1 +#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1 +#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252 + +/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */ +#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4 +/* Timer handle */ +#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0 +#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_LEN 4 + +/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52 +/* Host supplied handle (unique) */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_LEN 4 +/* Address into which to transfer data in host */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8 +/* AOE address from which to transfer data */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16 +/* Length of AOE transfer (total) */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20 +#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_LEN 4 +/* Length of host transfer (total) */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24 +#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_LEN 4 +/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28 +#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32 +#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_LEN 4 +/* When active, start read time */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40 +/* When active, end read time */ +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44 +#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48 + +/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */ +#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0 + +/* MC_CMD_FC_OUT_LOG msgresponse */ +#define MC_CMD_FC_OUT_LOG_LEN 0 + +/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */ +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_LEN 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_LEN 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_LEN 4 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20 +#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_LEN 4 + +/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */ +#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0 + +/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */ +#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0 + +/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */ +#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0 + +/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */ +#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_LEN 4 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1 +#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1 + +/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */ +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_LEN 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_LEN 4 + +/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */ +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num)) +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_LEN 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_LEN 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0 +#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31 + +/* MC_CMD_FC_OUT_SPI_READ msgresponse */ +#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4 +#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252 +#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num)) +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0 +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4 +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1 +#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63 + +/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */ +#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0 + +/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */ +#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8 +/* The 32-bit value read from the toggle count register */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0 +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_LEN 4 +/* The 32-bit value read from the clock enable count register */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4 +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_LEN 4 + +/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */ +#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8 +/* DDR soak test status word; bits [4:0] are relevant. */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_LEN 4 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1 +/* DDR soak test error count */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4 +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_LEN 4 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0 + +/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */ +#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0 + + +/***********************************/ +/* MC_CMD_AOE + * AOE operations on MC + */ +#define MC_CMD_AOE 0xa + +/* MC_CMD_AOE_IN msgrequest */ +#define MC_CMD_AOE_IN_LEN 4 +#define MC_CMD_AOE_IN_OP_HDR_OFST 0 +#define MC_CMD_AOE_IN_OP_HDR_LEN 4 +#define MC_CMD_AOE_IN_OP_LBN 0 +#define MC_CMD_AOE_IN_OP_WIDTH 8 +/* enum: FPGA and CPLD information */ +#define MC_CMD_AOE_OP_INFO 0x1 +/* enum: Currents and voltages read from MCP3424s; DEBUG */ +#define MC_CMD_AOE_OP_CURRENTS 0x2 +/* enum: Temperatures at locations around the PCB; DEBUG */ +#define MC_CMD_AOE_OP_TEMPERATURES 0x3 +/* enum: Set CPLD to idle */ +#define MC_CMD_AOE_OP_CPLD_IDLE 0x4 +/* enum: Read from CPLD register */ +#define MC_CMD_AOE_OP_CPLD_READ 0x5 +/* enum: Write to CPLD register */ +#define MC_CMD_AOE_OP_CPLD_WRITE 0x6 +/* enum: Execute CPLD instruction */ +#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7 +/* enum: Reprogram the CPLD on the AOE device */ +#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8 +/* enum: AOE power control */ +#define MC_CMD_AOE_OP_POWER 0x9 +/* enum: AOE image loading */ +#define MC_CMD_AOE_OP_LOAD 0xa +/* enum: Fan monitoring */ +#define MC_CMD_AOE_OP_FAN_CONTROL 0xb +/* enum: Fan failures since last reset */ +#define MC_CMD_AOE_OP_FAN_FAILURES 0xc +/* enum: Get generic AOE MAC statistics */ +#define MC_CMD_AOE_OP_MAC_STATS 0xd +/* enum: Retrieve PHY specific information */ +#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe +/* enum: Write a number of JTAG primitive commands, return will give data */ +#define MC_CMD_AOE_OP_JTAG_WRITE 0xf +/* enum: Control access to the FPGA via the Siena JTAG Chain */ +#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10 +/* enum: Set the MTU offset between Siena and AOE MACs */ +#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11 +/* enum: How link state is handled */ +#define MC_CMD_AOE_OP_LINK_STATE 0x12 +/* enum: How Siena MAC statistics are reported (deprecated - use + * MC_CMD_AOE_OP_ASIC_STATS) + */ +#define MC_CMD_AOE_OP_SIENA_STATS 0x13 +/* enum: How native ASIC MAC statistics are reported - replaces the deprecated + * command MC_CMD_AOE_OP_SIENA_STATS + */ +#define MC_CMD_AOE_OP_ASIC_STATS 0x13 +/* enum: DDR memory information */ +#define MC_CMD_AOE_OP_DDR 0x14 +/* enum: FC control */ +#define MC_CMD_AOE_OP_FC 0x15 +/* enum: DDR ECC status reads */ +#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16 +/* enum: Commands for MC-SPI Master emulation */ +#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17 +/* enum: Commands for FC boot control */ +#define MC_CMD_AOE_OP_FC_BOOT 0x18 +/* enum: Get number of internal ports */ +#define MC_CMD_AOE_OP_GET_ASIC_PORTS 0x19 +/* enum: Get FC assert information and register dump */ +#define MC_CMD_AOE_OP_GET_FC_ASSERT_INFO 0x1a + +/* MC_CMD_AOE_OUT msgresponse */ +#define MC_CMD_AOE_OUT_LEN 0 + +/* MC_CMD_AOE_IN_INFO msgrequest */ +#define MC_CMD_AOE_IN_INFO_LEN 4 +#define MC_CMD_AOE_IN_CMD_OFST 0 +#define MC_CMD_AOE_IN_CMD_LEN 4 + +/* MC_CMD_AOE_IN_CURRENTS msgrequest */ +#define MC_CMD_AOE_IN_CURRENTS_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */ +#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */ +#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_CPLD_READ msgrequest */ +#define MC_CMD_AOE_IN_CPLD_READ_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4 +#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_LEN 4 +#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8 +#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_LEN 4 + +/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */ +#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4 +#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_LEN 4 +#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8 +#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_LEN 4 +#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12 +#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_LEN 4 + +/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */ +#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4 +#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_LEN 4 + +/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4 +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_LEN 4 +/* enum: Reprogram CPLD, poll for completion */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1 +/* enum: Reprogram CPLD, send event on completion */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3 +/* enum: Get status of reprogramming operation */ +#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4 + +/* MC_CMD_AOE_IN_POWER msgrequest */ +#define MC_CMD_AOE_IN_POWER_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Turn on or off AOE power */ +#define MC_CMD_AOE_IN_POWER_OP_OFST 4 +#define MC_CMD_AOE_IN_POWER_OP_LEN 4 +/* enum: Turn off FPGA power */ +#define MC_CMD_AOE_IN_POWER_OFF 0x0 +/* enum: Turn on FPGA power */ +#define MC_CMD_AOE_IN_POWER_ON 0x1 +/* enum: Clear peak power measurement */ +#define MC_CMD_AOE_IN_POWER_CLEAR 0x2 +/* enum: Show current power in sensors output */ +#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3 +/* enum: Show peak power in sensors output */ +#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4 +/* enum: Show current DDR current */ +#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5 +/* enum: Show peak DDR current */ +#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6 +/* enum: Clear peak DDR current */ +#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7 + +/* MC_CMD_AOE_IN_LOAD msgrequest */ +#define MC_CMD_AOE_IN_LOAD_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence + */ +#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4 +#define MC_CMD_AOE_IN_LOAD_IMAGE_LEN 4 + +/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */ +#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* If non zero report measured fan RPM rather than nominal */ +#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4 +#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_LEN 4 + +/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */ +#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_MAC_STATS msgrequest */ +#define MC_CMD_AOE_IN_MAC_STATS_LEN 24 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* AOE port */ +#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4 +#define MC_CMD_AOE_IN_MAC_STATS_PORT_LEN 4 +/* Host memory address for statistics */ +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12 +#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16 +#define MC_CMD_AOE_IN_MAC_STATS_CMD_LEN 4 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1 +#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5 +#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16 +#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16 +/* Length of DMA data (optional) */ +#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20 +#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_LEN 4 + +/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */ +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* AOE port */ +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4 +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_LEN 4 +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8 +#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_LEN 4 + +/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */ +#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12 +#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252 +#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num)) +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_LEN 4 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1 +#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61 + +/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Enable or disable access */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4 +#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_LEN 4 +/* enum: Enable access */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1 +/* enum: Disable access */ +#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2 + +/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4 +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_LEN 4 +/* enum: Apply to all external ports */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000 +/* enum: Apply to all internal ports */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000 +/* The MTU offset to be applied to the external ports */ +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8 +#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_LEN 4 + +/* MC_CMD_AOE_IN_LINK_STATE msgrequest */ +#define MC_CMD_AOE_IN_LINK_STATE_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4 +#define MC_CMD_AOE_IN_LINK_STATE_MODE_LEN 4 +#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0 +#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8 +/* enum: AOE and associated external port */ +#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0 +/* enum: AOE and OR of all external ports */ +#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1 +/* enum: Individual ports */ +#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2 +/* enum: Configure link state mode on given AOE port */ +#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3 +#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8 +#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8 +/* enum: No-op */ +#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0 +/* enum: logical OR of all SFP ports link status */ +#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1 +/* enum: logical AND of all SFP ports link status */ +#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2 +#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16 +#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16 + +/* MC_CMD_AOE_IN_GET_ASIC_PORTS msgrequest */ +#define MC_CMD_AOE_IN_GET_ASIC_PORTS_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_GET_FC_ASSERT_INFO msgrequest */ +#define MC_CMD_AOE_IN_GET_FC_ASSERT_INFO_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */ +#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* How MAC statistics are reported */ +#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4 +#define MC_CMD_AOE_IN_SIENA_STATS_MODE_LEN 4 +/* enum: Statistics from Siena (default) */ +#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0 +/* enum: Statistics from AOE external ports */ +#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1 + +/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */ +#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* How MAC statistics are reported */ +#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4 +#define MC_CMD_AOE_IN_ASIC_STATS_MODE_LEN 4 +/* enum: Statistics from the ASIC (default) */ +#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0 +/* enum: Statistics from AOE external ports */ +#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1 + +/* MC_CMD_AOE_IN_DDR msgrequest */ +#define MC_CMD_AOE_IN_DDR_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_DDR_BANK_OFST 4 +#define MC_CMD_AOE_IN_DDR_BANK_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */ +/* Page index of SPD data */ +#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8 +#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_LEN 4 + +/* MC_CMD_AOE_IN_FC msgrequest */ +#define MC_CMD_AOE_IN_FC_LEN 4 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ + +/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */ +#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4 +#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */ + +/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* Basic commands for MC SPI Master emulation. */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_LEN 4 +/* enum: MC SPI read */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0 +/* enum: MC SPI write */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1 + +/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_LEN 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_LEN 4 + +/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_LEN 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_LEN 4 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12 +#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_LEN 4 + +/* MC_CMD_AOE_IN_FC_BOOT msgrequest */ +#define MC_CMD_AOE_IN_FC_BOOT_LEN 8 +/* MC_CMD_AOE_IN_CMD_OFST 0 */ +/* MC_CMD_AOE_IN_CMD_LEN 4 */ +/* FC boot control flags */ +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4 +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_LEN 4 +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0 +#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1 + +/* MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO msgresponse */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_LEN 144 +/* Assertion status flag. */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_LEN 4 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_LBN 8 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_WIDTH 8 +/* enum: No crash data available */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 */ +/* enum: New crash data available */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 */ +/* enum: Crash data has been sent */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_LBN 0 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_WIDTH 8 +/* enum: No crash has been recorded. */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 */ +/* enum: Crash due to exception. */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 */ +/* enum: Crash due to assertion. */ +/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 */ +/* Failing PC value */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_OFST 8 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_LEN 4 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_NUM 31 +/* Exception Type */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_OFST 132 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_LEN 4 +/* Instruction at which exception occurred */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_OFST 136 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_LEN 4 +/* BAD Address that triggered address-based exception */ +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_OFST 140 +#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_LEN 4 + +/* MC_CMD_AOE_OUT_INFO msgresponse */ +#define MC_CMD_AOE_OUT_INFO_LEN 44 +/* JTAG IDCODE of CPLD */ +#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0 +#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_LEN 4 +/* Version of CPLD */ +#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4 +#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_LEN 4 +/* JTAG IDCODE of FPGA */ +#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8 +#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_LEN 4 +/* JTAG USERCODE of FPGA */ +#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12 +#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_LEN 4 +/* FPGA type - read from CPLD straps */ +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16 +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_LEN 4 +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */ +#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */ +/* FPGA state (debug) */ +#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20 +#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_LEN 4 +/* FPGA image - partition from which loaded */ +#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24 +#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_LEN 4 +/* FC state */ +#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28 +#define MC_CMD_AOE_OUT_INFO_FC_STATE_LEN 4 +/* enum: Set if watchdog working */ +#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1 +/* enum: Set if MC-FC communications working */ +#define MC_CMD_AOE_OUT_INFO_COMMS 0x2 +/* Random pieces of information */ +#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32 +#define MC_CMD_AOE_OUT_INFO_FLAGS_LEN 4 +/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */ +#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1 +/* enum: CPLD apparently good */ +#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2 +/* enum: FPGA working normally */ +#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4 +/* enum: FPGA is powered */ +#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8 +/* enum: Board has incompatible SODIMMs fitted */ +#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10 +/* enum: Board has ByteBlaster connected */ +#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20 +/* enum: FPGA Boot flash has an invalid header. */ +#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40 +/* enum: FPGA Application flash is accessible. */ +#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80 +/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */ +#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36 +#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_LEN 4 +#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */ +#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */ +/* Result of FC booting - not valid while a ByteBlaster is connected. */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40 +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_LEN 4 +/* enum: No error */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0 +/* enum: Bad address set in CPLD */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1 +/* enum: Bad header */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2 +/* enum: Bad text section details */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3 +/* enum: Bad checksum */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4 +/* enum: Bad BSP */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5 +/* enum: Flash mode is invalid */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6 +/* enum: FC application loaded and execution attempted */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80 +/* enum: FC application Started */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81 +/* enum: No bootrom in FPGA */ +#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff + +/* MC_CMD_AOE_OUT_CURRENTS msgresponse */ +#define MC_CMD_AOE_OUT_CURRENTS_LEN 68 +/* Set of currents and voltages (mA or mV as appropriate) */ +#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0 +#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4 +#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17 +#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */ +#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */ + +/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */ +#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40 +/* Set of temperatures */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0 +#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4 +#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10 +/* enum: The first set of enum values are for Modena code. */ +#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0 +#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */ +/* enum: The second set of enum values are for Sorrento code. */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0 +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */ +#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */ + +/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */ +#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4 +/* The value read from the CPLD */ +#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0 +#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_LEN 4 + +/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */ +#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4 +#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252 +#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num)) +/* Failure counts for each fan */ +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0 +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4 +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1 +#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63 + +/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */ +#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4 +/* Results of status command (only) */ +#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0 +#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_LEN 4 + +/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */ +#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0 + +/* MC_CMD_AOE_OUT_POWER_ON msgresponse */ +#define MC_CMD_AOE_OUT_POWER_ON_LEN 0 + +/* MC_CMD_AOE_OUT_LOAD msgresponse */ +#define MC_CMD_AOE_OUT_LOAD_LEN 0 + +/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */ +#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0 + +/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA + * for details + */ +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3) +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS + +/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */ +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num)) +/* in bytes */ +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_LEN 4 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248 + +/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */ +#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12 +#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252 +#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num)) +/* Used to align the in and out data blocks so the MC can re-use the cmd */ +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_LEN 4 +/* out bytes */ +#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4 +#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_LEN 4 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1 +#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61 + +/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */ +#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0 + +/* MC_CMD_AOE_OUT_DDR msgresponse */ +#define MC_CMD_AOE_OUT_DDR_LENMIN 17 +#define MC_CMD_AOE_OUT_DDR_LENMAX 252 +#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num)) +/* Information on the module. */ +#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0 +#define MC_CMD_AOE_OUT_DDR_FLAGS_LEN 4 +#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0 +#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1 +#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2 +#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3 +#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1 +/* Memory size, in MB. */ +#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4 +#define MC_CMD_AOE_OUT_DDR_CAPACITY_LEN 4 +/* The memory type, as reported from SPD information */ +#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8 +#define MC_CMD_AOE_OUT_DDR_TYPE_LEN 4 +/* Nominal voltage of the module (as applied) */ +#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12 +#define MC_CMD_AOE_OUT_DDR_VOLTAGE_LEN 4 +/* SPD data read from the module */ +#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16 +#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1 +#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1 +#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236 + +/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */ +#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0 + +/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */ +#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0 + +/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */ +#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0 + +/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */ +#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0 + +/* MC_CMD_AOE_OUT_FC msgresponse */ +#define MC_CMD_AOE_OUT_FC_LEN 0 + +/* MC_CMD_AOE_OUT_GET_ASIC_PORTS msgresponse */ +#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_LEN 4 +/* get the number of internal ports */ +#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_OFST 0 +#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_LEN 4 + +/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */ +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8 +/* Flags describing status info on the module. */ +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_LEN 4 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1 +/* DDR ECC status on the module. */ +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_LEN 4 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24 +#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8 + +/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */ +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4 +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0 +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_LEN 4 + +/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */ +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0 + +/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */ +#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0 + +/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */ +#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0 + +#endif /* _SIENA_MC_DRIVER_PCOL_AOE_H */ +/*! \cidoxg_end */ diff --git a/drivers/net/sfc/base/efx_rx.c b/drivers/net/sfc/base/efx_rx.c index c0dcb752..4fd73bab 100644 --- a/drivers/net/sfc/base/efx_rx.c +++ b/drivers/net/sfc/base/efx_rx.c @@ -107,7 +107,7 @@ siena_rx_qcreate( __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, - __in uint32_t type_data, + __in const efx_rxq_type_data_t *type_data, __in efsys_mem_t *esmp, __in size_t ndescs, __in uint32_t id, @@ -151,7 +151,7 @@ static const efx_rx_ops_t __efx_rx_siena_ops = { }; #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_rx_ops_t __efx_rx_ef10_ops = { ef10_rx_init, /* erxo_init */ ef10_rx_fini, /* erxo_fini */ @@ -178,7 +178,7 @@ static const efx_rx_ops_t __efx_rx_ef10_ops = { ef10_rx_qcreate, /* erxo_qcreate */ ef10_rx_qdestroy, /* erxo_qdestroy */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t @@ -220,6 +220,12 @@ efx_rx_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + erxop = &__efx_rx_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -288,6 +294,94 @@ fail1: #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE + __checkReturn efx_rc_t +efx_rx_scale_hash_flags_get( + __in efx_nic_t *enp, + __in efx_rx_hash_alg_t hash_alg, + __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags, + __out unsigned int *nflagsp) +{ + efx_nic_cfg_t *encp = &enp->en_nic_cfg; + boolean_t l4; + boolean_t additional_modes; + unsigned int *entryp = flags; + efx_rc_t rc; + + if (flags == NULL || nflagsp == NULL) { + rc = EINVAL; + goto fail1; + } + + l4 = encp->enc_rx_scale_l4_hash_supported; + additional_modes = encp->enc_rx_scale_additional_modes_supported; + +#define LIST_FLAGS(_entryp, _class, _l4_hashing, _additional_modes) \ + do { \ + if (_l4_hashing) { \ + *(_entryp++) = EFX_RX_HASH(_class, 4TUPLE); \ + \ + if (_additional_modes) { \ + *(_entryp++) = \ + EFX_RX_HASH(_class, 2TUPLE_DST); \ + *(_entryp++) = \ + EFX_RX_HASH(_class, 2TUPLE_SRC); \ + } \ + } \ + \ + *(_entryp++) = EFX_RX_HASH(_class, 2TUPLE); \ + \ + if (_additional_modes) { \ + *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_DST); \ + *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_SRC); \ + } \ + \ + *(_entryp++) = EFX_RX_HASH(_class, DISABLE); \ + \ + _NOTE(CONSTANTCONDITION) \ + } while (B_FALSE) + + switch (hash_alg) { + case EFX_RX_HASHALG_PACKED_STREAM: + if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0) + break; + /* FALLTHRU */ + case EFX_RX_HASHALG_TOEPLITZ: + if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0) + break; + + LIST_FLAGS(entryp, IPV4_TCP, l4, additional_modes); + LIST_FLAGS(entryp, IPV6_TCP, l4, additional_modes); + + if (additional_modes) { + LIST_FLAGS(entryp, IPV4_UDP, l4, additional_modes); + LIST_FLAGS(entryp, IPV6_UDP, l4, additional_modes); + } + + LIST_FLAGS(entryp, IPV4, B_FALSE, additional_modes); + LIST_FLAGS(entryp, IPV6, B_FALSE, additional_modes); + break; + + default: + rc = EINVAL; + goto fail2; + } + +#undef LIST_FLAGS + + *nflagsp = (unsigned int)(entryp - flags); + EFSYS_ASSERT3U(*nflagsp, <=, EFX_RX_HASH_NFLAGS); + + return (0); + +fail2: + EFSYS_PROBE(fail2); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + __checkReturn efx_rc_t efx_rx_hash_default_support_get( __in efx_nic_t *enp, @@ -419,19 +513,82 @@ efx_rx_scale_mode_set( __in boolean_t insert) { const efx_rx_ops_t *erxop = enp->en_erxop; + unsigned int type_flags[EFX_RX_HASH_NFLAGS]; + unsigned int type_nflags; + efx_rx_hash_type_t type_check; + unsigned int i; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); + /* + * Legacy flags and modern bits cannot be + * used at the same time in the hash type. + */ + if ((type & EFX_RX_HASH_LEGACY_MASK) && + (type & ~EFX_RX_HASH_LEGACY_MASK)) { + rc = EINVAL; + goto fail1; + } + + /* + * Translate legacy flags to the new representation + * so that chip-specific handlers will consider the + * new flags only. + */ + if (type & EFX_RX_HASH_IPV4) { + type |= EFX_RX_HASH(IPV4, 2TUPLE); + type |= EFX_RX_HASH(IPV4_TCP, 2TUPLE); + type |= EFX_RX_HASH(IPV4_UDP, 2TUPLE); + } + + if (type & EFX_RX_HASH_TCPIPV4) + type |= EFX_RX_HASH(IPV4_TCP, 4TUPLE); + + if (type & EFX_RX_HASH_IPV6) { + type |= EFX_RX_HASH(IPV6, 2TUPLE); + type |= EFX_RX_HASH(IPV6_TCP, 2TUPLE); + type |= EFX_RX_HASH(IPV6_UDP, 2TUPLE); + } + + if (type & EFX_RX_HASH_TCPIPV6) + type |= EFX_RX_HASH(IPV6_TCP, 4TUPLE); + + type &= ~EFX_RX_HASH_LEGACY_MASK; + type_check = type; + + /* + * Get the list of supported hash flags and sanitise the input. + */ + rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags, &type_nflags); + if (rc != 0) + goto fail2; + + for (i = 0; i < type_nflags; ++i) { + if ((type_check & type_flags[i]) == type_flags[i]) + type_check &= ~(type_flags[i]); + } + + if (type_check != 0) { + rc = EINVAL; + goto fail3; + } + if (erxop->erxo_scale_mode_set != NULL) { if ((rc = erxop->erxo_scale_mode_set(enp, rss_context, alg, type, insert)) != 0) - goto fail1; + goto fail4; } return (0); +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); @@ -594,7 +751,7 @@ efx_rx_qcreate_internal( __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, - __in uint32_t type_data, + __in const efx_rxq_type_data_t *type_data, __in efsys_mem_t *esmp, __in size_t ndescs, __in uint32_t id, @@ -655,8 +812,8 @@ efx_rx_qcreate( __in efx_evq_t *eep, __deref_out efx_rxq_t **erpp) { - return efx_rx_qcreate_internal(enp, index, label, type, 0, esmp, ndescs, - id, flags, eep, erpp); + return efx_rx_qcreate_internal(enp, index, label, type, NULL, + esmp, ndescs, id, flags, eep, erpp); } #if EFSYS_OPT_RX_PACKED_STREAM @@ -672,13 +829,71 @@ efx_rx_qcreate_packed_stream( __in efx_evq_t *eep, __deref_out efx_rxq_t **erpp) { + efx_rxq_type_data_t type_data; + + memset(&type_data, 0, sizeof(type_data)); + + type_data.ertd_packed_stream.eps_buf_size = ps_buf_size; + return efx_rx_qcreate_internal(enp, index, label, - EFX_RXQ_TYPE_PACKED_STREAM, ps_buf_size, esmp, ndescs, + EFX_RXQ_TYPE_PACKED_STREAM, &type_data, esmp, ndescs, 0 /* id unused on EF10 */, EFX_RXQ_FLAG_NONE, eep, erpp); } #endif +#if EFSYS_OPT_RX_ES_SUPER_BUFFER + + __checkReturn efx_rc_t +efx_rx_qcreate_es_super_buffer( + __in efx_nic_t *enp, + __in unsigned int index, + __in unsigned int label, + __in uint32_t n_bufs_per_desc, + __in uint32_t max_dma_len, + __in uint32_t buf_stride, + __in uint32_t hol_block_timeout, + __in efsys_mem_t *esmp, + __in size_t ndescs, + __in unsigned int flags, + __in efx_evq_t *eep, + __deref_out efx_rxq_t **erpp) +{ + efx_rc_t rc; + efx_rxq_type_data_t type_data; + + if (hol_block_timeout > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) { + rc = EINVAL; + goto fail1; + } + + memset(&type_data, 0, sizeof(type_data)); + + type_data.ertd_es_super_buffer.eessb_bufs_per_desc = n_bufs_per_desc; + type_data.ertd_es_super_buffer.eessb_max_dma_len = max_dma_len; + type_data.ertd_es_super_buffer.eessb_buf_stride = buf_stride; + type_data.ertd_es_super_buffer.eessb_hol_block_timeout = + hol_block_timeout; + + rc = efx_rx_qcreate_internal(enp, index, label, + EFX_RXQ_TYPE_ES_SUPER_BUFFER, &type_data, esmp, ndescs, + 0 /* id unused on EF10 */, flags, eep, erpp); + if (rc != 0) + goto fail2; + + return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif + + void efx_rx_qdestroy( __in efx_rxq_t *erp) @@ -875,6 +1090,10 @@ siena_rx_scale_mode_set( __in efx_rx_hash_type_t type, __in boolean_t insert) { + efx_rx_hash_type_t type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE); + efx_rx_hash_type_t type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE); + efx_rx_hash_type_t type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE); + efx_rx_hash_type_t type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE); efx_rc_t rc; if (rss_context != EFX_RSS_CONTEXT_DEFAULT) { @@ -889,12 +1108,12 @@ siena_rx_scale_mode_set( case EFX_RX_HASHALG_TOEPLITZ: EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert, - type & EFX_RX_HASH_IPV4, - type & EFX_RX_HASH_TCPIPV4); + (type & type_ipv4) == type_ipv4, + (type & type_ipv4_tcp) == type_ipv4_tcp); EFX_RX_TOEPLITZ_IPV6_HASH(enp, - type & EFX_RX_HASH_IPV6, - type & EFX_RX_HASH_TCPIPV6, + (type & type_ipv6) == type_ipv6, + (type & type_ipv6_tcp) == type_ipv6_tcp, rc); if (rc != 0) goto fail2; @@ -1320,7 +1539,7 @@ siena_rx_qcreate( __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, - __in uint32_t type_data, + __in const efx_rxq_type_data_t *type_data, __in efsys_mem_t *esmp, __in size_t ndescs, __in uint32_t id, diff --git a/drivers/net/sfc/base/efx_sram.c b/drivers/net/sfc/base/efx_sram.c index 1f0ba0a9..7851ff13 100644 --- a/drivers/net/sfc/base/efx_sram.c +++ b/drivers/net/sfc/base/efx_sram.c @@ -25,9 +25,10 @@ efx_sram_buf_tbl_set( EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 if (enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD) { + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2) { /* * FIXME: the efx_sram_buf_tbl_*() functionality needs to be * pulled inside the Falcon/Siena queue create/destroy code, @@ -39,7 +40,7 @@ efx_sram_buf_tbl_set( return (0); } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ if (stop >= EFX_BUF_TBL_SIZE) { rc = EFBIG; @@ -147,9 +148,10 @@ efx_sram_buf_tbl_clear( EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 if (enp->en_family == EFX_FAMILY_HUNTINGTON || - enp->en_family == EFX_FAMILY_MEDFORD) { + enp->en_family == EFX_FAMILY_MEDFORD || + enp->en_family == EFX_FAMILY_MEDFORD2) { /* * FIXME: the efx_sram_buf_tbl_*() functionality needs to be * pulled inside the Falcon/Siena queue create/destroy code, @@ -161,7 +163,7 @@ efx_sram_buf_tbl_clear( return; } -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE); diff --git a/drivers/net/sfc/base/efx_tunnel.c b/drivers/net/sfc/base/efx_tunnel.c index 25fa976f..399fd540 100644 --- a/drivers/net/sfc/base/efx_tunnel.c +++ b/drivers/net/sfc/base/efx_tunnel.c @@ -17,20 +17,20 @@ static const efx_tunnel_ops_t __efx_tunnel_dummy_ops = { }; #endif /* EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON */ -#if EFSYS_OPT_MEDFORD +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static __checkReturn boolean_t -medford_udp_encap_supported( +ef10_udp_encap_supported( __in efx_nic_t *enp); static __checkReturn efx_rc_t -medford_tunnel_reconfigure( +ef10_tunnel_reconfigure( __in efx_nic_t *enp); -static const efx_tunnel_ops_t __efx_tunnel_medford_ops = { - medford_udp_encap_supported, /* eto_udp_encap_supported */ - medford_tunnel_reconfigure, /* eto_reconfigure */ +static const efx_tunnel_ops_t __efx_tunnel_ef10_ops = { + ef10_udp_encap_supported, /* eto_udp_encap_supported */ + ef10_tunnel_reconfigure, /* eto_reconfigure */ }; -#endif /* EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ static __checkReturn efx_rc_t efx_mcdi_set_tunnel_encap_udp_ports( @@ -161,10 +161,16 @@ efx_tunnel_init( #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: - etop = &__efx_tunnel_medford_ops; + etop = &__efx_tunnel_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + etop = &__efx_tunnel_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -394,9 +400,9 @@ fail1: return (rc); } -#if EFSYS_OPT_MEDFORD +#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static __checkReturn boolean_t -medford_udp_encap_supported( +ef10_udp_encap_supported( __in efx_nic_t *enp) { const efx_nic_cfg_t *encp = &enp->en_nic_cfg; @@ -410,7 +416,7 @@ medford_udp_encap_supported( } static __checkReturn efx_rc_t -medford_tunnel_reconfigure( +ef10_tunnel_reconfigure( __in efx_nic_t *enp) { efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg; @@ -423,7 +429,7 @@ medford_tunnel_reconfigure( memcpy(&etc, etcp, sizeof (etc)); EFSYS_UNLOCK(enp->en_eslp, state); - if (medford_udp_encap_supported(enp) == B_FALSE) { + if (ef10_udp_encap_supported(enp) == B_FALSE) { /* * It is OK to apply empty UDP tunnel ports when UDP * tunnel encapsulations are not supported - just nothing @@ -458,6 +464,6 @@ fail1: return (rc); } -#endif /* EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ #endif /* EFSYS_OPT_TUNNEL */ diff --git a/drivers/net/sfc/base/efx_tx.c b/drivers/net/sfc/base/efx_tx.c index 4e02c869..da37580a 100644 --- a/drivers/net/sfc/base/efx_tx.c +++ b/drivers/net/sfc/base/efx_tx.c @@ -117,6 +117,7 @@ static const efx_tx_ops_t __efx_tx_siena_ops = { NULL, /* etxo_qdesc_tso_create */ NULL, /* etxo_qdesc_tso2_create */ NULL, /* etxo_qdesc_vlantci_create */ + NULL, /* etxo_qdesc_checksum_create */ #if EFSYS_OPT_QSTATS siena_tx_qstats_update, /* etxo_qstats_update */ #endif @@ -143,6 +144,7 @@ static const efx_tx_ops_t __efx_tx_hunt_ops = { ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ + ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */ #if EFSYS_OPT_QSTATS ef10_tx_qstats_update, /* etxo_qstats_update */ #endif @@ -169,12 +171,41 @@ static const efx_tx_ops_t __efx_tx_medford_ops = { NULL, /* etxo_qdesc_tso_create */ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ + ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */ #if EFSYS_OPT_QSTATS ef10_tx_qstats_update, /* etxo_qstats_update */ #endif }; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 +static const efx_tx_ops_t __efx_tx_medford2_ops = { + ef10_tx_init, /* etxo_init */ + ef10_tx_fini, /* etxo_fini */ + ef10_tx_qcreate, /* etxo_qcreate */ + ef10_tx_qdestroy, /* etxo_qdestroy */ + ef10_tx_qpost, /* etxo_qpost */ + ef10_tx_qpush, /* etxo_qpush */ + ef10_tx_qpace, /* etxo_qpace */ + ef10_tx_qflush, /* etxo_qflush */ + ef10_tx_qenable, /* etxo_qenable */ + ef10_tx_qpio_enable, /* etxo_qpio_enable */ + ef10_tx_qpio_disable, /* etxo_qpio_disable */ + ef10_tx_qpio_write, /* etxo_qpio_write */ + ef10_tx_qpio_post, /* etxo_qpio_post */ + ef10_tx_qdesc_post, /* etxo_qdesc_post */ + ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */ + NULL, /* etxo_qdesc_tso_create */ + ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */ + ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */ + ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */ +#if EFSYS_OPT_QSTATS + ef10_tx_qstats_update, /* etxo_qstats_update */ +#endif +}; +#endif /* EFSYS_OPT_MEDFORD2 */ + + __checkReturn efx_rc_t efx_tx_init( __in efx_nic_t *enp) @@ -214,6 +245,12 @@ efx_tx_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + etxop = &__efx_tx_medford2_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; @@ -588,6 +625,7 @@ efx_tx_qdesc_tso_create( efx_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, + __in uint16_t outer_ipv4_id, __in uint32_t tcp_seq, __in uint16_t mss, __out_ecount(count) efx_desc_t *edp, @@ -599,7 +637,8 @@ efx_tx_qdesc_tso2_create( EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL); - etxop->etxo_qdesc_tso2_create(etp, ipv4_id, tcp_seq, mss, edp, count); + etxop->etxo_qdesc_tso2_create(etp, ipv4_id, outer_ipv4_id, + tcp_seq, mss, edp, count); } void @@ -617,6 +656,21 @@ efx_tx_qdesc_vlantci_create( etxop->etxo_qdesc_vlantci_create(etp, tci, edp); } + void +efx_tx_qdesc_checksum_create( + __in efx_txq_t *etp, + __in uint16_t flags, + __out efx_desc_t *edp) +{ + efx_nic_t *enp = etp->et_enp; + const efx_tx_ops_t *etxop = enp->en_etxop; + + EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); + EFSYS_ASSERT(etxop->etxo_qdesc_checksum_create != NULL); + + etxop->etxo_qdesc_checksum_create(etp, flags, edp); +} + #if EFSYS_OPT_QSTATS void diff --git a/drivers/net/sfc/base/efx_types.h b/drivers/net/sfc/base/efx_types.h index 0581f67f..65168ab7 100644 --- a/drivers/net/sfc/base/efx_types.h +++ b/drivers/net/sfc/base/efx_types.h @@ -328,6 +328,16 @@ extern int fix_lint; #define FIX_LINT(_x) (_x) #endif +/* + * Saturation arithmetic subtract with minimum equal to zero. + * + * Use saturating arithmetic to ensure a non-negative result. This + * avoids undefined behaviour (and compiler warnings) when used as a + * shift count. + */ +#define EFX_SSUB(_val, _sub) \ + ((_val) > (_sub) ? ((_val) - (_sub)) : 0) + /* * Extract bit field portion [low,high) from the native-endian element * which contains bits [min,max). @@ -347,8 +357,8 @@ extern int fix_lint; ((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \ 0U : \ ((_low > _min) ? \ - ((_element) >> (_low - _min)) : \ - ((_element) << (_min - _low)))) + ((_element) >> EFX_SSUB(_low, _min)) : \ + ((_element) << EFX_SSUB(_min, _low)))) /* * Extract bit field portion [low,high) from the 64-bit little-endian @@ -537,29 +547,29 @@ extern int fix_lint; (((_low > _max) || (_high < _min)) ? \ 0U : \ ((_low > _min) ? \ - (((uint64_t)(_value)) << (_low - _min)) : \ - (((uint64_t)(_value)) >> (_min - _low)))) + (((uint64_t)(_value)) << EFX_SSUB(_low, _min)) :\ + (((uint64_t)(_value)) >> EFX_SSUB(_min, _low)))) #define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \ (((_low > _max) || (_high < _min)) ? \ 0U : \ ((_low > _min) ? \ - (((uint32_t)(_value)) << (_low - _min)) : \ - (((uint32_t)(_value)) >> (_min - _low)))) + (((uint32_t)(_value)) << EFX_SSUB(_low, _min)) :\ + (((uint32_t)(_value)) >> EFX_SSUB(_min, _low)))) #define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \ (((_low > _max) || (_high < _min)) ? \ 0U : \ (uint16_t)((_low > _min) ? \ - ((_value) << (_low - _min)) : \ - ((_value) >> (_min - _low)))) + ((_value) << EFX_SSUB(_low, _min)) : \ + ((_value) >> EFX_SSUB(_min, _low)))) #define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \ (((_low > _max) || (_high < _min)) ? \ 0U : \ (uint8_t)((_low > _min) ? \ - ((_value) << (_low - _min)) : \ - ((_value) >> (_min - _low)))) + ((_value) << EFX_SSUB(_low, _min)) : \ + ((_value) >> EFX_SSUB(_min, _low)))) /* * Construct bit field portion @@ -1288,22 +1298,22 @@ extern int fix_lint; #define EFX_SHIFT64(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 64) ? \ - ((uint64_t)1 << ((_bit) - (_base))) : \ + ((uint64_t)1 << EFX_SSUB((_bit), (_base))) : \ 0U) #define EFX_SHIFT32(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 32) ? \ - ((uint32_t)1 << ((_bit) - (_base))) : \ + ((uint32_t)1 << EFX_SSUB((_bit),(_base))) : \ 0U) #define EFX_SHIFT16(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 16) ? \ - (uint16_t)(1 << ((_bit) - (_base))) : \ + (uint16_t)(1 << EFX_SSUB((_bit), (_base))) : \ 0U) #define EFX_SHIFT8(_bit, _base) \ (((_bit) >= (_base) && (_bit) < (_base) + 8) ? \ - (uint8_t)(1 << ((_bit) - (_base))) : \ + (uint8_t)(1 << EFX_SSUB((_bit), (_base))) : \ 0U) #define EFX_SET_OWORD_BIT64(_oword, _bit) \ diff --git a/drivers/net/sfc/base/efx_vpd.c b/drivers/net/sfc/base/efx_vpd.c index 7b8138f3..6d783d74 100644 --- a/drivers/net/sfc/base/efx_vpd.c +++ b/drivers/net/sfc/base/efx_vpd.c @@ -44,7 +44,7 @@ static const efx_vpd_ops_t __efx_vpd_siena_ops = { #endif /* EFSYS_OPT_SIENA */ -#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD +#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 static const efx_vpd_ops_t __efx_vpd_ef10_ops = { ef10_vpd_init, /* evpdo_init */ @@ -59,7 +59,7 @@ static const efx_vpd_ops_t __efx_vpd_ef10_ops = { ef10_vpd_fini, /* evpdo_fini */ }; -#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ +#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */ __checkReturn efx_rc_t efx_vpd_init( @@ -91,6 +91,12 @@ efx_vpd_init( break; #endif /* EFSYS_OPT_MEDFORD */ +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + evpdop = &__efx_vpd_ef10_ops; + break; +#endif /* EFSYS_OPT_MEDFORD2 */ + default: EFSYS_ASSERT(0); rc = ENOTSUP; diff --git a/drivers/net/sfc/base/hunt_nic.c b/drivers/net/sfc/base/hunt_nic.c index d03cc138..16ea81d2 100644 --- a/drivers/net/sfc/base/hunt_nic.c +++ b/drivers/net/sfc/base/hunt_nic.c @@ -36,7 +36,7 @@ hunt_nic_get_required_pcie_bandwidth( goto out; } - if (port_modes & (1 << TLV_PORT_MODE_40G_40G)) { + if (port_modes & (1U << TLV_PORT_MODE_40G_40G)) { /* * This needs the full PCIe bandwidth (and could use * more) - roughly 64 Gbit/s for 8 lanes of Gen3. @@ -45,9 +45,9 @@ hunt_nic_get_required_pcie_bandwidth( EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0) goto fail1; } else { - if (port_modes & (1 << TLV_PORT_MODE_40G)) { + if (port_modes & (1U << TLV_PORT_MODE_40G)) { max_port_mode = TLV_PORT_MODE_40G; - } else if (port_modes & (1 << TLV_PORT_MODE_10G_10G_10G_10G)) { + } else if (port_modes & (1U << TLV_PORT_MODE_10G_10G_10G_10G)) { max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G; } else { /* Assume two 10G ports */ @@ -76,90 +76,13 @@ fail1: hunt_board_cfg( __in efx_nic_t *enp) { - efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); - uint8_t mac_addr[6] = { 0 }; - uint32_t board_type = 0; - ef10_link_state_t els; efx_port_t *epp = &(enp->en_port); - uint32_t port; - uint32_t pf; - uint32_t vf; - uint32_t mask; uint32_t flags; uint32_t sysclk, dpcpu_clk; - uint32_t base, nvec; uint32_t bandwidth; efx_rc_t rc; - if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) - goto fail1; - - /* - * NOTE: The MCDI protocol numbers ports from zero. - * The common code MCDI interface numbers ports from one. - */ - emip->emi_port = port + 1; - - if ((rc = ef10_external_port_mapping(enp, port, - &encp->enc_external_port)) != 0) - goto fail2; - - /* - * Get PCIe function number from firmware (used for - * per-function privilege and dynamic config info). - * - PCIe PF: pf = PF number, vf = 0xffff. - * - PCIe VF: pf = parent PF, vf = VF number. - */ - if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) - goto fail3; - - encp->enc_pf = pf; - encp->enc_vf = vf; - - /* MAC address for this function */ - if (EFX_PCI_FUNCTION_IS_PF(encp)) { - rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); - if ((rc == 0) && (mac_addr[0] & 0x02)) { - /* - * If the static config does not include a global MAC - * address pool then the board may return a locally - * administered MAC address (this should only happen on - * incorrectly programmed boards). - */ - rc = EINVAL; - } - } else { - rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); - } - if (rc != 0) - goto fail4; - - EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); - - /* Board configuration */ - rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); - if (rc != 0) { - /* Unprivileged functions may not be able to read board cfg */ - if (rc == EACCES) - board_type = 0; - else - goto fail5; - } - - encp->enc_board_type = board_type; - encp->enc_clk_mult = 1; /* not used for Huntington */ - - /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ - if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) - goto fail6; - - /* Obtain the default PHY advertised capabilities */ - if ((rc = ef10_phy_get_link(enp, &els)) != 0) - goto fail7; - epp->ep_default_adv_cap_mask = els.els_adv_cap_mask; - epp->ep_adv_cap_mask = els.els_adv_cap_mask; - /* * Enable firmware workarounds for hardware errata. * Expected responses are: @@ -187,7 +110,7 @@ hunt_board_cfg( else if ((rc == ENOTSUP) || (rc == ENOENT)) encp->enc_bug35388_workaround = B_FALSE; else - goto fail8; + goto fail1; /* * If the bug41750 workaround is enabled, then do not test interrupts, @@ -206,7 +129,7 @@ hunt_board_cfg( } else if ((rc == ENOTSUP) || (rc == ENOENT)) { encp->enc_bug41750_workaround = B_FALSE; } else { - goto fail9; + goto fail2; } if (EFX_PCI_FUNCTION_IS_VF(encp)) { /* Interrupt testing does not work for VFs. See bug50084. */ @@ -244,12 +167,12 @@ hunt_board_cfg( } else if ((rc == ENOTSUP) || (rc == ENOENT)) { encp->enc_bug26807_workaround = B_FALSE; } else { - goto fail10; + goto fail3; } /* Get clock frequencies (in MHz). */ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) - goto fail11; + goto fail4; /* * The Huntington timer quantum is 1536 sysclk cycles, documented for @@ -266,80 +189,23 @@ hunt_board_cfg( encp->enc_bug61265_workaround = B_FALSE; /* Medford only */ - /* Check capabilities of running datapath firmware */ - if ((rc = ef10_get_datapath_caps(enp)) != 0) - goto fail12; - /* Alignment for receive packet DMA buffers */ encp->enc_rx_buf_align_start = 1; encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */ - /* Alignment for WPTR updates */ - encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; - - /* - * Maximum number of exclusive RSS contexts which can be allocated. The - * hardware supports 64, but 6 are reserved for shared contexts. They - * are a global resource so not all may be available. - */ - encp->enc_rx_scale_max_exclusive_contexts = 58; - - encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); - /* No boundary crossing limits */ - encp->enc_tx_dma_desc_boundary = 0; - - /* - * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use - * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available - * resources (allocated to this PCIe function), which is zero until - * after we have allocated VIs. - */ - encp->enc_evq_limit = 1024; - encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; - encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; - /* * The workaround for bug35388 uses the top bit of transmit queue * descriptor writes, preventing the use of 4096 descriptor TXQs. */ encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096; - encp->enc_buftbl_limit = 0xFFFFFFFF; - + EFX_STATIC_ASSERT(HUNT_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS; encp->enc_piobuf_size = HUNT_PIOBUF_SIZE; encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE; - /* - * Get the current privilege mask. Note that this may be modified - * dynamically, so this value is informational only. DO NOT use - * the privilege mask to check for sufficient privileges, as that - * can result in time-of-check/time-of-use bugs. - */ - if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) - goto fail13; - encp->enc_privilege_mask = mask; - - /* Get interrupt vector limits */ - if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { - if (EFX_PCI_FUNCTION_IS_PF(encp)) - goto fail14; - - /* Ignore error (cannot query vector limits from a VF). */ - base = 0; - nvec = 1024; - } - encp->enc_intr_vec_base = base; - encp->enc_intr_limit = nvec; - - /* - * Maximum number of bytes into the frame the TCP header can start for - * firmware assisted TSO to work. - */ - encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; - if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0) - goto fail15; + goto fail5; encp->enc_required_pcie_bandwidth_mbps = bandwidth; /* All Huntington devices have a PCIe Gen3, 8 lane connector */ @@ -347,26 +213,6 @@ hunt_board_cfg( return (0); -fail15: - EFSYS_PROBE(fail15); -fail14: - EFSYS_PROBE(fail14); -fail13: - EFSYS_PROBE(fail13); -fail12: - EFSYS_PROBE(fail12); -fail11: - EFSYS_PROBE(fail11); -fail10: - EFSYS_PROBE(fail10); -fail9: - EFSYS_PROBE(fail9); -fail8: - EFSYS_PROBE(fail8); -fail7: - EFSYS_PROBE(fail7); -fail6: - EFSYS_PROBE(fail6); fail5: EFSYS_PROBE(fail5); fail4: diff --git a/drivers/net/sfc/base/mcdi_mon.c b/drivers/net/sfc/base/mcdi_mon.c index e4de0dab..940bd026 100644 --- a/drivers/net/sfc/base/mcdi_mon.c +++ b/drivers/net/sfc/base/mcdi_mon.c @@ -135,6 +135,10 @@ static const struct mcdi_sensor_map_s { STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */ STAT(Px, I1V8), /* 0x51 IN_I1V8 */ STAT(Px, I2V5), /* 0x52 IN_I2V5 */ + STAT(Px, I3V3), /* 0x53 IN_I3V3 */ + STAT(Px, I12V0), /* 0x54 IN_I12V0 */ + STAT(Px, 1_3V), /* 0x55 IN_1V3 */ + STAT(Px, I1V3), /* 0x56 IN_I1V3 */ }; #define MCDI_STATIC_SENSOR_ASSERT(_field) \ @@ -476,6 +480,11 @@ mcdi_mon_cfg_build( case EFX_FAMILY_MEDFORD: encp->enc_mon_type = EFX_MON_SFC92X0; break; +#endif +#if EFSYS_OPT_MEDFORD2 + case EFX_FAMILY_MEDFORD2: + encp->enc_mon_type = EFX_MON_SFC92X0; + break; #endif default: rc = EINVAL; diff --git a/drivers/net/sfc/base/medford2_impl.h b/drivers/net/sfc/base/medford2_impl.h new file mode 100644 index 00000000..6259a700 --- /dev/null +++ b/drivers/net/sfc/base/medford2_impl.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2015-2018 Solarflare Communications Inc. + * All rights reserved. + */ + +#ifndef _SYS_MEDFORD2_IMPL_H +#define _SYS_MEDFORD2_IMPL_H + +#ifdef __cplusplus +extern "C" { +#endif + + +#ifndef ER_EZ_TX_PIOBUF_SIZE +#define ER_EZ_TX_PIOBUF_SIZE 4096 +#endif + + +#define MEDFORD2_PIOBUF_NBUFS (16) +#define MEDFORD2_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE) + +#define MEDFORD2_MIN_PIO_ALLOC_SIZE (MEDFORD2_PIOBUF_SIZE / 32) + + +extern __checkReturn efx_rc_t +medford2_board_cfg( + __in efx_nic_t *enp); + + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_MEDFORD2_IMPL_H */ diff --git a/drivers/net/sfc/base/medford2_nic.c b/drivers/net/sfc/base/medford2_nic.c new file mode 100644 index 00000000..7f5ad175 --- /dev/null +++ b/drivers/net/sfc/base/medford2_nic.c @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2015-2018 Solarflare Communications Inc. + * All rights reserved. + */ + +#include "efx.h" +#include "efx_impl.h" + + +#if EFSYS_OPT_MEDFORD2 + +static __checkReturn efx_rc_t +medford2_nic_get_required_pcie_bandwidth( + __in efx_nic_t *enp, + __out uint32_t *bandwidth_mbpsp) +{ + uint32_t port_modes; + uint32_t current_mode; + uint32_t bandwidth; + efx_rc_t rc; + + /* FIXME: support new Medford2 dynamic port modes */ + + if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, + ¤t_mode)) != 0) { + /* No port mode info available. */ + bandwidth = 0; + goto out; + } + + if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode, + &bandwidth)) != 0) + goto fail1; + +out: + *bandwidth_mbpsp = bandwidth; + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + + __checkReturn efx_rc_t +medford2_board_cfg( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t sysclk, dpcpu_clk; + uint32_t end_padding; + uint32_t bandwidth; + efx_rc_t rc; + + /* + * Enable firmware workarounds for hardware errata. + * Expected responses are: + * - 0 (zero): + * Success: workaround enabled or disabled as requested. + * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP): + * Firmware does not support the MC_CMD_WORKAROUND request. + * (assume that the workaround is not supported). + * - MC_CMD_ERR_ENOENT (reported as ENOENT): + * Firmware does not support the requested workaround. + * - MC_CMD_ERR_EPERM (reported as EACCES): + * Unprivileged function cannot enable/disable workarounds. + * + * See efx_mcdi_request_errcode() for MCDI error translations. + */ + + + if (EFX_PCI_FUNCTION_IS_VF(encp)) { + /* + * Interrupt testing does not work for VFs on Medford2. + * See bug50084 and bug71432 comment 21. + */ + encp->enc_bug41750_workaround = B_TRUE; + } + + /* Chained multicast is always enabled on Medford2 */ + encp->enc_bug26807_workaround = B_TRUE; + + /* + * If the bug61265 workaround is enabled, then interrupt holdoff timers + * cannot be controlled by timer table writes, so MCDI must be used + * (timer table writes can still be used for wakeup timers). + */ + rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE, + NULL); + if ((rc == 0) || (rc == EACCES)) + encp->enc_bug61265_workaround = B_TRUE; + else if ((rc == ENOTSUP) || (rc == ENOENT)) + encp->enc_bug61265_workaround = B_FALSE; + else + goto fail1; + + /* Get clock frequencies (in MHz). */ + if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) + goto fail2; + + /* + * The Medford2 timer quantum is 1536 dpcpu_clk cycles, documented for + * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units. + */ + encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */ + encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << + FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; + + /* Alignment for receive packet DMA buffers */ + encp->enc_rx_buf_align_start = 1; + + /* Get the RX DMA end padding alignment configuration */ + if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) { + if (rc != EACCES) + goto fail3; + + /* Assume largest tail padding size supported by hardware */ + end_padding = 256; + } + encp->enc_rx_buf_align_end = end_padding; + + /* + * The maximum supported transmit queue size is 2048. TXQs with 4096 + * descriptors are not supported as the top bit is used for vfifo + * stuffing. + */ + encp->enc_txq_max_ndescs = 2048; + + EFX_STATIC_ASSERT(MEDFORD2_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); + encp->enc_piobuf_limit = MEDFORD2_PIOBUF_NBUFS; + encp->enc_piobuf_size = MEDFORD2_PIOBUF_SIZE; + encp->enc_piobuf_min_alloc_size = MEDFORD2_MIN_PIO_ALLOC_SIZE; + + /* + * Medford2 stores a single global copy of VPD, not per-PF as on + * Huntington. + */ + encp->enc_vpd_is_global = B_TRUE; + + rc = medford2_nic_get_required_pcie_bandwidth(enp, &bandwidth); + if (rc != 0) + goto fail4; + encp->enc_required_pcie_bandwidth_mbps = bandwidth; + encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; + + return (0); + +fail4: + EFSYS_PROBE(fail4); +fail3: + EFSYS_PROBE(fail3); +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + +#endif /* EFSYS_OPT_MEDFORD2 */ diff --git a/drivers/net/sfc/base/medford_nic.c b/drivers/net/sfc/base/medford_nic.c index 1365e9e3..6dc895f5 100644 --- a/drivers/net/sfc/base/medford_nic.c +++ b/drivers/net/sfc/base/medford_nic.c @@ -10,64 +10,6 @@ #if EFSYS_OPT_MEDFORD -static __checkReturn efx_rc_t -efx_mcdi_get_rxdp_config( - __in efx_nic_t *enp, - __out uint32_t *end_paddingp) -{ - efx_mcdi_req_t req; - uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN, - MC_CMD_GET_RXDP_CONFIG_OUT_LEN)]; - uint32_t end_padding; - efx_rc_t rc; - - memset(payload, 0, sizeof (payload)); - req.emr_cmd = MC_CMD_GET_RXDP_CONFIG; - req.emr_in_buf = payload; - req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN; - req.emr_out_buf = payload; - req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN; - - efx_mcdi_execute(enp, &req); - if (req.emr_rc != 0) { - rc = req.emr_rc; - goto fail1; - } - - if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, - GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) { - /* RX DMA end padding is disabled */ - end_padding = 0; - } else { - switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, - GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) { - case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64: - end_padding = 64; - break; - case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128: - end_padding = 128; - break; - case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256: - end_padding = 256; - break; - default: - rc = ENOTSUP; - goto fail2; - } - } - - *end_paddingp = end_padding; - - return (0); - -fail2: - EFSYS_PROBE(fail2); -fail1: - EFSYS_PROBE1(fail1, efx_rc_t, rc); - - return (rc); -} - static __checkReturn efx_rc_t medford_nic_get_required_pcie_bandwidth( __in efx_nic_t *enp, @@ -104,103 +46,12 @@ fail1: medford_board_cfg( __in efx_nic_t *enp) { - efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); efx_nic_cfg_t *encp = &(enp->en_nic_cfg); - uint8_t mac_addr[6] = { 0 }; - uint32_t board_type = 0; - ef10_link_state_t els; - efx_port_t *epp = &(enp->en_port); - uint32_t port; - uint32_t pf; - uint32_t vf; - uint32_t mask; uint32_t sysclk, dpcpu_clk; - uint32_t base, nvec; uint32_t end_padding; uint32_t bandwidth; efx_rc_t rc; - /* - * FIXME: Likely to be incomplete and incorrect. - * Parts of this should be shared with Huntington. - */ - - if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) - goto fail1; - - /* - * NOTE: The MCDI protocol numbers ports from zero. - * The common code MCDI interface numbers ports from one. - */ - emip->emi_port = port + 1; - - if ((rc = ef10_external_port_mapping(enp, port, - &encp->enc_external_port)) != 0) - goto fail2; - - /* - * Get PCIe function number from firmware (used for - * per-function privilege and dynamic config info). - * - PCIe PF: pf = PF number, vf = 0xffff. - * - PCIe VF: pf = parent PF, vf = VF number. - */ - if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) - goto fail3; - - encp->enc_pf = pf; - encp->enc_vf = vf; - - /* MAC address for this function */ - if (EFX_PCI_FUNCTION_IS_PF(encp)) { - rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); -#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC - /* - * Disable static config checking for Medford NICs, ONLY - * for manufacturing test and setup at the factory, to - * allow the static config to be installed. - */ -#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ - if ((rc == 0) && (mac_addr[0] & 0x02)) { - /* - * If the static config does not include a global MAC - * address pool then the board may return a locally - * administered MAC address (this should only happen on - * incorrectly programmed boards). - */ - rc = EINVAL; - } -#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ - } else { - rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); - } - if (rc != 0) - goto fail4; - - EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); - - /* Board configuration */ - rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); - if (rc != 0) { - /* Unprivileged functions may not be able to read board cfg */ - if (rc == EACCES) - board_type = 0; - else - goto fail5; - } - - encp->enc_board_type = board_type; - encp->enc_clk_mult = 1; /* not used for Medford */ - - /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ - if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) - goto fail6; - - /* Obtain the default PHY advertised capabilities */ - if ((rc = ef10_phy_get_link(enp, &els)) != 0) - goto fail7; - epp->ep_default_adv_cap_mask = els.els_adv_cap_mask; - epp->ep_adv_cap_mask = els.els_adv_cap_mask; - /* * Enable firmware workarounds for hardware errata. * Expected responses are: @@ -220,8 +71,8 @@ medford_board_cfg( if (EFX_PCI_FUNCTION_IS_VF(encp)) { /* - * Interrupt testing does not work for VFs. See bug50084. - * FIXME: Does this still apply to Medford? + * Interrupt testing does not work for VFs. See bug50084 and + * bug71432 comment 21. */ encp->enc_bug41750_workaround = B_TRUE; } @@ -241,11 +92,11 @@ medford_board_cfg( else if ((rc == ENOTSUP) || (rc == ENOENT)) encp->enc_bug61265_workaround = B_FALSE; else - goto fail8; + goto fail1; /* Get clock frequencies (in MHz). */ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) - goto fail9; + goto fail2; /* * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for @@ -255,47 +106,19 @@ medford_board_cfg( encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; - /* Check capabilities of running datapath firmware */ - if ((rc = ef10_get_datapath_caps(enp)) != 0) - goto fail10; - /* Alignment for receive packet DMA buffers */ encp->enc_rx_buf_align_start = 1; /* Get the RX DMA end padding alignment configuration */ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) { if (rc != EACCES) - goto fail11; + goto fail3; /* Assume largest tail padding size supported by hardware */ end_padding = 256; } encp->enc_rx_buf_align_end = end_padding; - /* Alignment for WPTR updates */ - encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; - - /* - * Maximum number of exclusive RSS contexts which can be allocated. The - * hardware supports 64, but 6 are reserved for shared contexts. They - * are a global resource so not all may be available. - */ - encp->enc_rx_scale_max_exclusive_contexts = 58; - - encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); - /* No boundary crossing limits */ - encp->enc_tx_dma_desc_boundary = 0; - - /* - * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use - * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available - * resources (allocated to this PCIe function), which is zero until - * after we have allocated VIs. - */ - encp->enc_evq_limit = 1024; - encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; - encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; - /* * The maximum supported transmit queue size is 2048. TXQs with 4096 * descriptors are not supported as the top bit is used for vfifo @@ -303,40 +126,11 @@ medford_board_cfg( */ encp->enc_txq_max_ndescs = 2048; - encp->enc_buftbl_limit = 0xFFFFFFFF; - + EFX_STATIC_ASSERT(MEDFORD_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS; encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE; encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE; - /* - * Get the current privilege mask. Note that this may be modified - * dynamically, so this value is informational only. DO NOT use - * the privilege mask to check for sufficient privileges, as that - * can result in time-of-check/time-of-use bugs. - */ - if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) - goto fail12; - encp->enc_privilege_mask = mask; - - /* Get interrupt vector limits */ - if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { - if (EFX_PCI_FUNCTION_IS_PF(encp)) - goto fail13; - - /* Ignore error (cannot query vector limits from a VF). */ - base = 0; - nvec = 1024; - } - encp->enc_intr_vec_base = base; - encp->enc_intr_limit = nvec; - - /* - * Maximum number of bytes into the frame the TCP header can start for - * firmware assisted TSO to work. - */ - encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; - /* * Medford stores a single global copy of VPD, not per-PF as on * Huntington. @@ -345,32 +139,12 @@ medford_board_cfg( rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth); if (rc != 0) - goto fail14; + goto fail4; encp->enc_required_pcie_bandwidth_mbps = bandwidth; encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; return (0); -fail14: - EFSYS_PROBE(fail14); -fail13: - EFSYS_PROBE(fail13); -fail12: - EFSYS_PROBE(fail12); -fail11: - EFSYS_PROBE(fail11); -fail10: - EFSYS_PROBE(fail10); -fail9: - EFSYS_PROBE(fail9); -fail8: - EFSYS_PROBE(fail8); -fail7: - EFSYS_PROBE(fail7); -fail6: - EFSYS_PROBE(fail6); -fail5: - EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: diff --git a/drivers/net/sfc/base/meson.build b/drivers/net/sfc/base/meson.build index f1e49735..da2bf44d 100644 --- a/drivers/net/sfc/base/meson.build +++ b/drivers/net/sfc/base/meson.build @@ -34,6 +34,7 @@ sources = [ 'siena_vpd.c', 'ef10_ev.c', 'ef10_filter.c', + 'ef10_image.c', 'ef10_intr.c', 'ef10_mac.c', 'ef10_mcdi.c', @@ -44,7 +45,8 @@ sources = [ 'ef10_tx.c', 'ef10_vpd.c', 'hunt_nic.c', - 'medford_nic.c' + 'medford_nic.c', + 'medford2_nic.c' ] extra_flags = [ diff --git a/drivers/net/sfc/base/siena_flash.h b/drivers/net/sfc/base/siena_flash.h index 91a9fe05..74bb9496 100644 --- a/drivers/net/sfc/base/siena_flash.h +++ b/drivers/net/sfc/base/siena_flash.h @@ -103,7 +103,14 @@ typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */ /* the key, or 0xffff if unsigned. (Otherwise set to 0) */ efx_byte_t mumfw_subtype; /* MUM & SUC images: subtype. (Otherwise set to 0) */ efx_byte_t reserved_b[3]; /* (set to 0) */ - efx_dword_t reserved_c[6]; /* (set to 0) */ + efx_dword_t security_level; /* This number increases every time a serious security flaw */ + /* is fixed. A secure NIC may not downgrade to any image */ + /* with a lower security level than the current image. */ + /* Note: The number in this header should only be used for */ + /* determining the level of new images, not to determine */ + /* the level of the current image as this header is not */ + /* protected by a CMAC. */ + efx_dword_t reserved_c[5]; /* (set to 0) */ } siena_mc_boot_hdr_t; #define SIENA_MC_BOOT_HDR_PADDING \ diff --git a/drivers/net/sfc/base/siena_mac.c b/drivers/net/sfc/base/siena_mac.c index 904e03ed..f8857cdd 100644 --- a/drivers/net/sfc/base/siena_mac.c +++ b/drivers/net/sfc/base/siena_mac.c @@ -245,16 +245,28 @@ siena_mac_stats_update( __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp) { - efx_qword_t value; + const efx_nic_cfg_t *encp = &enp->en_nic_cfg; efx_qword_t generation_start; efx_qword_t generation_end; + efx_qword_t value; + efx_rc_t rc; - _NOTE(ARGUNUSED(enp)) + if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { + /* MAC stats count too small */ + rc = ENOSPC; + goto fail1; + } + if (EFSYS_MEM_SIZE(esmp) < + (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) { + /* DMA buffer too small */ + rc = ENOSPC; + goto fail2; + } /* Read END first so we don't race with the MC */ - EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); - SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END, - &generation_end); + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); + SIENA_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1), + &generation_end); EFSYS_MEM_READ_BARRIER(); /* TX */ @@ -422,7 +434,7 @@ siena_mac_stats_update( SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value); EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value); - EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE); + EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp)); EFSYS_MEM_READ_BARRIER(); SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START, &generation_start); @@ -437,6 +449,13 @@ siena_mac_stats_update( *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0); return (0); + +fail2: + EFSYS_PROBE(fail2); +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); } #endif /* EFSYS_OPT_MAC_STATS */ diff --git a/drivers/net/sfc/base/siena_mcdi.c b/drivers/net/sfc/base/siena_mcdi.c index ef844591..d727c187 100644 --- a/drivers/net/sfc/base/siena_mcdi.c +++ b/drivers/net/sfc/base/siena_mcdi.c @@ -124,17 +124,21 @@ siena_mcdi_read_response( { efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); unsigned int pdur; - unsigned int pos; + unsigned int pos = 0; efx_dword_t data; + size_t remaining = length; EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2); pdur = SIENA_MCDI_PDU(emip); - for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) { + while (remaining > 0) { + size_t chunk = MIN(remaining, sizeof (data)); + EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur + ((offset + pos) >> 2), &data, B_FALSE); - memcpy((uint8_t *)bufferp + pos, &data, - MIN(sizeof (data), length - pos)); + memcpy((uint8_t *)bufferp + pos, &data, chunk); + pos += chunk; + remaining -= chunk; } } diff --git a/drivers/net/sfc/base/siena_nic.c b/drivers/net/sfc/base/siena_nic.c index f223c9be..31eef80b 100644 --- a/drivers/net/sfc/base/siena_nic.c +++ b/drivers/net/sfc/base/siena_nic.c @@ -66,6 +66,10 @@ siena_board_cfg( uint32_t nevq, nrxq, ntxq; efx_rc_t rc; + /* Siena has a fixed 8Kbyte VI window size */ + EFX_STATIC_ASSERT(1U << EFX_VI_WINDOW_SHIFT_8K == 8192); + encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; + /* External port identifier using one-based port numbering */ encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port; @@ -114,6 +118,18 @@ siena_board_cfg( /* There is one RSS context per function */ encp->enc_rx_scale_max_exclusive_contexts = 1; + encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_LFSR); + encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_TOEPLITZ); + + /* + * It is always possible to use port numbers + * as the input data for hash computation. + */ + encp->enc_rx_scale_l4_hash_supported = B_TRUE; + + /* There is no support for additional RSS modes */ + encp->enc_rx_scale_additional_modes_supported = B_FALSE; + encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT); /* Fragments must not span 4k boundaries. */ encp->enc_tx_dma_desc_boundary = 4096; @@ -145,6 +161,8 @@ siena_board_cfg( encp->enc_allow_set_mac_with_installed_filters = B_TRUE; encp->enc_rx_packed_stream_supported = B_FALSE; encp->enc_rx_var_packed_stream_supported = B_FALSE; + encp->enc_rx_es_super_buffer_supported = B_FALSE; + encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE; /* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */ encp->enc_required_pcie_bandwidth_mbps = 2 * 10000; @@ -152,6 +170,12 @@ siena_board_cfg( encp->enc_nvram_update_verify_result_supported = B_FALSE; + encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS; + + encp->enc_filter_action_flag_supported = B_FALSE; + encp->enc_filter_action_mark_supported = B_FALSE; + encp->enc_filter_action_mark_max = 0; + return (0); fail2: diff --git a/drivers/net/sfc/base/siena_nvram.c b/drivers/net/sfc/base/siena_nvram.c index e72bba0b..8cdd2df7 100644 --- a/drivers/net/sfc/base/siena_nvram.c +++ b/drivers/net/sfc/base/siena_nvram.c @@ -304,15 +304,20 @@ siena_nvram_get_dynamic_cfg( if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0) goto fail1; + if (size < SIENA_NVRAM_CHUNK) { + rc = EINVAL; + goto fail2; + } + EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg); if (dcfg == NULL) { rc = ENOMEM; - goto fail2; + goto fail3; } if ((rc = siena_nvram_partn_read(enp, partn, 0, (caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0) - goto fail3; + goto fail4; /* Verify the magic */ if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0) @@ -347,7 +352,7 @@ siena_nvram_get_dynamic_cfg( if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK, (caddr_t)dcfg + SIENA_NVRAM_CHUNK, region - SIENA_NVRAM_CHUNK)) != 0) - goto fail4; + goto fail5; } /* Verify checksum */ @@ -389,13 +394,15 @@ done: return (0); +fail5: + EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); -fail3: - EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, size, dcfg); +fail3: + EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: diff --git a/drivers/net/sfc/base/siena_phy.c b/drivers/net/sfc/base/siena_phy.c index d638646b..4b2190d3 100644 --- a/drivers/net/sfc/base/siena_phy.c +++ b/drivers/net/sfc/base/siena_phy.c @@ -534,6 +534,11 @@ siena_phy_stats_update( MC_CMD_PHY_STATS_OUT_DMA_LEN)]; efx_rc_t rc; + if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_PHY_STATS_SIZE)) { + rc = EINVAL; + goto fail1; + } + (void) memset(payload, 0, sizeof (payload)); req.emr_cmd = MC_CMD_PHY_STATS; req.emr_in_buf = payload; @@ -550,7 +555,7 @@ siena_phy_stats_update( if (req.emr_rc != 0) { rc = req.emr_rc; - goto fail1; + goto fail2; } EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN); @@ -559,6 +564,8 @@ siena_phy_stats_update( return (0); +fail2: + EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); diff --git a/drivers/net/sfc/base/siena_vpd.c b/drivers/net/sfc/base/siena_vpd.c index f188eb58..ebb12abf 100644 --- a/drivers/net/sfc/base/siena_vpd.c +++ b/drivers/net/sfc/base/siena_vpd.c @@ -36,21 +36,26 @@ siena_vpd_get_static( if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0) goto fail1; + if (size < SIENA_NVRAM_CHUNK) { + rc = EINVAL; + goto fail2; + } + EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg); if (scfg == NULL) { rc = ENOMEM; - goto fail2; + goto fail3; } if ((rc = siena_nvram_partn_read(enp, partn, 0, (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0) - goto fail3; + goto fail4; /* Verify the magic number */ if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) != SIENA_MC_STATIC_CONFIG_MAGIC) { rc = EINVAL; - goto fail4; + goto fail5; } /* All future versions of the structure must be backwards compatible */ @@ -64,7 +69,7 @@ siena_vpd_get_static( if (hdr_length > size || vpd_offset > size || vpd_length > size || vpd_length + vpd_offset > size) { rc = EINVAL; - goto fail5; + goto fail6; } /* Read the remainder of scfg + static vpd */ @@ -73,7 +78,7 @@ siena_vpd_get_static( if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK, (caddr_t)scfg + SIENA_NVRAM_CHUNK, region - SIENA_NVRAM_CHUNK)) != 0) - goto fail6; + goto fail7; } /* Verify checksum */ @@ -82,7 +87,7 @@ siena_vpd_get_static( cksum += ((uint8_t *)scfg)[pos]; if (cksum != 0) { rc = EINVAL; - goto fail7; + goto fail8; } if (vpd_length == 0) @@ -92,7 +97,7 @@ siena_vpd_get_static( EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd); if (svpd == NULL) { rc = ENOMEM; - goto fail8; + goto fail9; } memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length); } @@ -104,6 +109,8 @@ siena_vpd_get_static( return (0); +fail9: + EFSYS_PROBE(fail9); fail8: EFSYS_PROBE(fail8); fail7: @@ -114,11 +121,11 @@ fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); -fail3: - EFSYS_PROBE(fail3); EFSYS_KMEM_FREE(enp->en_esip, size, scfg); +fail3: + EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: diff --git a/drivers/net/sfc/efsys.h b/drivers/net/sfc/efsys.h index c7a54c3b..b9d2df58 100644 --- a/drivers/net/sfc/efsys.h +++ b/drivers/net/sfc/efsys.h @@ -26,6 +26,7 @@ #include #include "sfc_debug.h" +#include "sfc_log.h" #ifdef __cplusplus extern "C" { @@ -119,6 +120,8 @@ prefetch_read_once(const volatile void *addr) #define __out_ecount_opt(_n) #define __out_bcount(_n) #define __out_bcount_opt(_n) +#define __out_bcount_part(_n, _l) +#define __out_bcount_part_opt(_n, _l) #define __deref_out @@ -148,6 +151,8 @@ prefetch_read_once(const volatile void *addr) #define EFSYS_OPT_HUNTINGTON 1 /* Enable SFN8xxx support */ #define EFSYS_OPT_MEDFORD 1 +/* Enable SFN2xxx support */ +#define EFSYS_OPT_MEDFORD2 1 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG #define EFSYS_OPT_CHECK_REG 1 #else @@ -161,7 +166,7 @@ prefetch_read_once(const volatile void *addr) #define EFSYS_OPT_MAC_STATS 1 -#define EFSYS_OPT_LOOPBACK 0 +#define EFSYS_OPT_LOOPBACK 1 #define EFSYS_OPT_MON_MCDI 0 #define EFSYS_OPT_MON_STATS 0 @@ -174,6 +179,7 @@ prefetch_read_once(const volatile void *addr) #define EFSYS_OPT_VPD 0 #define EFSYS_OPT_NVRAM 0 #define EFSYS_OPT_BOOTCFG 0 +#define EFSYS_OPT_IMAGE_LAYOUT 0 #define EFSYS_OPT_DIAG 0 #define EFSYS_OPT_RX_SCALE 1 @@ -192,8 +198,12 @@ prefetch_read_once(const volatile void *addr) #define EFSYS_OPT_RX_PACKED_STREAM 0 +#define EFSYS_OPT_RX_ES_SUPER_BUFFER 1 + #define EFSYS_OPT_TUNNEL 1 +#define EFSYS_OPT_FW_SUBVARIANT_AWARE 1 + /* ID */ typedef struct __efsys_identifier_s efsys_identifier_t; @@ -370,6 +380,9 @@ typedef struct efsys_mem_s { } while (B_FALSE) +#define EFSYS_MEM_SIZE(_esmp) \ + ((_esmp)->esm_mz->len) + #define EFSYS_MEM_ADDR(_esmp) \ ((_esmp)->esm_addr) @@ -721,7 +734,7 @@ typedef uint64_t efsys_stat_t; #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \ do { \ (void)(_esip); \ - RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \ + SFC_GENERIC_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \ (_code), (_dword0), (_dword1)); \ _NOTE(CONSTANTCONDITION); \ } while (B_FALSE) diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build index b60a8f58..3aa14c7b 100644 --- a/drivers/net/sfc/meson.build +++ b/drivers/net/sfc/meson.build @@ -30,10 +30,6 @@ extra_flags += [ '-Wbad-function-cast' ] -# Suppress ICC false positive warning on 'bulk' may be used before its -# value is set -extra_flags += '-wd3656' - foreach flag: extra_flags if cc.has_argument(flag) cflags += flag @@ -58,6 +54,7 @@ sources = files( 'sfc_flow.c', 'sfc_dp.c', 'sfc_ef10_rx.c', + 'sfc_ef10_essb_rx.c', 'sfc_ef10_tx.c' ) diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c index ac5fdcaa..6690053f 100644 --- a/drivers/net/sfc/sfc.c +++ b/drivers/net/sfc/sfc.c @@ -20,6 +20,8 @@ #include "sfc_ev.h" #include "sfc_rx.h" #include "sfc_tx.h" +#include "sfc_kvargs.h" +#include "sfc_tweak.h" int @@ -81,14 +83,23 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds) phy_caps |= (1 << EFX_PHY_CAP_1000FDX) | (1 << EFX_PHY_CAP_10000FDX) | - (1 << EFX_PHY_CAP_40000FDX); + (1 << EFX_PHY_CAP_25000FDX) | + (1 << EFX_PHY_CAP_40000FDX) | + (1 << EFX_PHY_CAP_50000FDX) | + (1 << EFX_PHY_CAP_100000FDX); } if (speeds & ETH_LINK_SPEED_1G) phy_caps |= (1 << EFX_PHY_CAP_1000FDX); if (speeds & ETH_LINK_SPEED_10G) phy_caps |= (1 << EFX_PHY_CAP_10000FDX); + if (speeds & ETH_LINK_SPEED_25G) + phy_caps |= (1 << EFX_PHY_CAP_25000FDX); if (speeds & ETH_LINK_SPEED_40G) phy_caps |= (1 << EFX_PHY_CAP_40000FDX); + if (speeds & ETH_LINK_SPEED_50G) + phy_caps |= (1 << EFX_PHY_CAP_50000FDX); + if (speeds & ETH_LINK_SPEED_100G) + phy_caps |= (1 << EFX_PHY_CAP_100000FDX); return phy_caps; } @@ -113,10 +124,12 @@ sfc_check_conf(struct sfc_adapter *sa) rc = EINVAL; } +#if !EFSYS_OPT_LOOPBACK if (conf->lpbk_mode != 0) { sfc_err(sa, "Loopback not supported"); rc = EINVAL; } +#endif if (conf->dcb_capability_en != 0) { sfc_err(sa, "Priority-based flow control not supported"); @@ -249,6 +262,58 @@ sfc_set_drv_limits(struct sfc_adapter *sa) return efx_nic_set_drv_limits(sa->nic, &lim); } +static int +sfc_set_fw_subvariant(struct sfc_adapter *sa) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads; + unsigned int txq_index; + efx_nic_fw_subvariant_t req_fw_subvariant; + efx_nic_fw_subvariant_t cur_fw_subvariant; + int rc; + + if (!encp->enc_fw_subvariant_no_tx_csum_supported) { + sfc_info(sa, "no-Tx-checksum subvariant not supported"); + return 0; + } + + for (txq_index = 0; txq_index < sa->txq_count; ++txq_index) { + struct sfc_txq_info *txq_info = &sa->txq_info[txq_index]; + + if (txq_info->txq != NULL) + tx_offloads |= txq_info->txq->offloads; + } + + if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) + req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT; + else + req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM; + + rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant); + if (rc != 0) { + sfc_err(sa, "failed to get FW subvariant: %d", rc); + return rc; + } + sfc_info(sa, "FW subvariant is %u vs required %u", + cur_fw_subvariant, req_fw_subvariant); + + if (cur_fw_subvariant == req_fw_subvariant) + return 0; + + rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant); + if (rc != 0) { + sfc_err(sa, "failed to set FW subvariant %u: %d", + req_fw_subvariant, rc); + return rc; + } + sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant); + + return 0; +} + static int sfc_try_start(struct sfc_adapter *sa) { @@ -260,6 +325,11 @@ sfc_try_start(struct sfc_adapter *sa) SFC_ASSERT(sfc_adapter_is_locked(sa)); SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING); + sfc_log_init(sa, "set FW subvariant"); + rc = sfc_set_fw_subvariant(sa); + if (rc != 0) + goto fail_set_fw_subvariant; + sfc_log_init(sa, "set resource limits"); rc = sfc_set_drv_limits(sa); if (rc != 0) @@ -326,6 +396,7 @@ fail_tunnel_reconfigure: fail_nic_init: fail_set_drv_limits: +fail_set_fw_subvariant: sfc_log_init(sa, "failed %d", rc); return rc; } @@ -344,7 +415,7 @@ sfc_start(struct sfc_adapter *sa) case SFC_ADAPTER_CONFIGURED: break; case SFC_ADAPTER_STARTED: - sfc_info(sa, "already started"); + sfc_notice(sa, "already started"); return 0; default: rc = EINVAL; @@ -383,7 +454,7 @@ sfc_stop(struct sfc_adapter *sa) case SFC_ADAPTER_STARTED: break; case SFC_ADAPTER_CONFIGURED: - sfc_info(sa, "already stopped"); + sfc_notice(sa, "already stopped"); return; default: sfc_err(sa, "stop in unexpected state %u", sa->state); @@ -454,7 +525,7 @@ sfc_schedule_restart(struct sfc_adapter *sa) else if (rc != 0) sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc); else - sfc_info(sa, "restart scheduled"); + sfc_notice(sa, "restart scheduled"); } int @@ -530,27 +601,18 @@ sfc_close(struct sfc_adapter *sa) } static int -sfc_mem_bar_init(struct sfc_adapter *sa) +sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar) { struct rte_eth_dev *eth_dev = sa->eth_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); efsys_bar_t *ebp = &sa->mem_bar; - unsigned int i; - struct rte_mem_resource *res; - - for (i = 0; i < RTE_DIM(pci_dev->mem_resource); i++) { - res = &pci_dev->mem_resource[i]; - if ((res->len != 0) && (res->phys_addr != 0)) { - /* Found first memory BAR */ - SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name); - ebp->esb_rid = i; - ebp->esb_dev = pci_dev; - ebp->esb_base = res->addr; - return 0; - } - } + struct rte_mem_resource *res = &pci_dev->mem_resource[membar]; - return EFAULT; + SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name); + ebp->esb_rid = membar; + ebp->esb_dev = pci_dev; + ebp->esb_base = res->addr; + return 0; } static void @@ -562,7 +624,6 @@ sfc_mem_bar_fini(struct sfc_adapter *sa) memset(ebp, 0, sizeof(*ebp)); } -#if EFSYS_OPT_RX_SCALE /* * A fixed RSS key which has a property of being symmetric * (symmetrical flows are distributed to the same CPU) @@ -576,12 +637,11 @@ static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = { 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, }; -#endif -#if EFSYS_OPT_RX_SCALE static int -sfc_set_rss_defaults(struct sfc_adapter *sa) +sfc_rss_attach(struct sfc_adapter *sa) { + struct sfc_rss *rss = &sa->rss; int rc; rc = efx_intr_init(sa->nic, sa->intr.type, NULL); @@ -596,26 +656,31 @@ sfc_set_rss_defaults(struct sfc_adapter *sa) if (rc != 0) goto fail_rx_init; - rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support); + rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type); if (rc != 0) goto fail_scale_support_get; - rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support); + rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support); if (rc != 0) goto fail_hash_support_get; + rc = sfc_rx_hash_init(sa); + if (rc != 0) + goto fail_rx_hash_init; + efx_rx_fini(sa->nic); efx_ev_fini(sa->nic); efx_intr_fini(sa->nic); - sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS); - - rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key)); + rte_memcpy(rss->key, default_rss_key, sizeof(rss->key)); return 0; +fail_rx_hash_init: fail_hash_support_get: fail_scale_support_get: + efx_rx_fini(sa->nic); + fail_rx_init: efx_ev_fini(sa->nic); @@ -625,13 +690,12 @@ fail_ev_init: fail_intr_init: return rc; } -#else -static int -sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa) + +static void +sfc_rss_detach(struct sfc_adapter *sa) { - return 0; + sfc_rx_hash_fini(sa); } -#endif int sfc_attach(struct sfc_adapter *sa) @@ -690,9 +754,9 @@ sfc_attach(struct sfc_adapter *sa) if (rc != 0) goto fail_port_attach; - rc = sfc_set_rss_defaults(sa); + rc = sfc_rss_attach(sa); if (rc != 0) - goto fail_set_rss_defaults; + goto fail_rss_attach; rc = sfc_filter_attach(sa); if (rc != 0) @@ -709,7 +773,9 @@ sfc_attach(struct sfc_adapter *sa) return 0; fail_filter_attach: -fail_set_rss_defaults: + sfc_rss_detach(sa); + +fail_rss_attach: sfc_port_detach(sa); fail_port_attach: @@ -741,6 +807,7 @@ sfc_detach(struct sfc_adapter *sa) sfc_flow_fini(sa); sfc_filter_detach(sa); + sfc_rss_detach(sa); sfc_port_detach(sa); sfc_ev_detach(sa); sfc_intr_detach(sa); @@ -749,10 +816,169 @@ sfc_detach(struct sfc_adapter *sa) sa->state = SFC_ADAPTER_UNINITIALIZED; } +static int +sfc_kvarg_fv_variant_handler(__rte_unused const char *key, + const char *value_str, void *opaque) +{ + uint32_t *value = opaque; + + if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0) + *value = EFX_FW_VARIANT_DONT_CARE; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0) + *value = EFX_FW_VARIANT_FULL_FEATURED; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0) + *value = EFX_FW_VARIANT_LOW_LATENCY; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0) + *value = EFX_FW_VARIANT_PACKED_STREAM; + else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0) + *value = EFX_FW_VARIANT_DPDK; + else + return -EINVAL; + + return 0; +} + +static int +sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv) +{ + efx_nic_fw_info_t enfi; + int rc; + + rc = efx_nic_get_fw_version(sa->nic, &enfi); + if (rc != 0) + return rc; + else if (!enfi.enfi_dpcpu_fw_ids_valid) + return ENOTSUP; + + /* + * Firmware variant can be uniquely identified by the RxDPCPU + * firmware id + */ + switch (enfi.enfi_rx_dpcpu_fw_id) { + case EFX_RXDP_FULL_FEATURED_FW_ID: + *efv = EFX_FW_VARIANT_FULL_FEATURED; + break; + + case EFX_RXDP_LOW_LATENCY_FW_ID: + *efv = EFX_FW_VARIANT_LOW_LATENCY; + break; + + case EFX_RXDP_PACKED_STREAM_FW_ID: + *efv = EFX_FW_VARIANT_PACKED_STREAM; + break; + + case EFX_RXDP_DPDK_FW_ID: + *efv = EFX_FW_VARIANT_DPDK; + break; + + default: + /* + * Other firmware variants are not considered, since they are + * not supported in the device parameters + */ + *efv = EFX_FW_VARIANT_DONT_CARE; + break; + } + + return 0; +} + +static const char * +sfc_fw_variant2str(efx_fw_variant_t efv) +{ + switch (efv) { + case EFX_RXDP_FULL_FEATURED_FW_ID: + return SFC_KVARG_FW_VARIANT_FULL_FEATURED; + case EFX_RXDP_LOW_LATENCY_FW_ID: + return SFC_KVARG_FW_VARIANT_LOW_LATENCY; + case EFX_RXDP_PACKED_STREAM_FW_ID: + return SFC_KVARG_FW_VARIANT_PACKED_STREAM; + case EFX_RXDP_DPDK_FW_ID: + return SFC_KVARG_FW_VARIANT_DPDK; + default: + return "unknown"; + } +} + +static int +sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa) +{ + int rc; + long value; + + value = SFC_RXD_WAIT_TIMEOUT_NS_DEF; + + rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS, + sfc_kvarg_long_handler, &value); + if (rc != 0) + return rc; + + if (value < 0 || + (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) { + sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' " + "was set (%ld);", value); + sfc_err(sa, "it must not be less than 0 or greater than %u", + EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX); + return EINVAL; + } + + sa->rxd_wait_timeout_ns = value; + return 0; +} + +static int +sfc_nic_probe(struct sfc_adapter *sa) +{ + efx_nic_t *enp = sa->nic; + efx_fw_variant_t preferred_efv; + efx_fw_variant_t efv; + int rc; + + preferred_efv = EFX_FW_VARIANT_DONT_CARE; + rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT, + sfc_kvarg_fv_variant_handler, + &preferred_efv); + if (rc != 0) { + sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT); + return rc; + } + + rc = sfc_kvarg_rxd_wait_timeout_ns(sa); + if (rc != 0) + return rc; + + rc = efx_nic_probe(enp, preferred_efv); + if (rc == EACCES) { + /* Unprivileged functions cannot set FW variant */ + rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE); + } + if (rc != 0) + return rc; + + rc = sfc_get_fw_variant(sa, &efv); + if (rc == ENOTSUP) { + sfc_warn(sa, "FW variant can not be obtained"); + return 0; + } + if (rc != 0) + return rc; + + /* Check that firmware variant was changed to the requested one */ + if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) { + sfc_warn(sa, "FW variant has not changed to the requested %s", + sfc_fw_variant2str(preferred_efv)); + } + + sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv)); + + return 0; +} + int sfc_probe(struct sfc_adapter *sa) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev); + unsigned int membar; efx_nic_t *enp; int rc; @@ -763,17 +989,17 @@ sfc_probe(struct sfc_adapter *sa) sa->socket_id = rte_socket_id(); rte_atomic32_init(&sa->restart_required); - sfc_log_init(sa, "init mem bar"); - rc = sfc_mem_bar_init(sa); - if (rc != 0) - goto fail_mem_bar_init; - sfc_log_init(sa, "get family"); rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id, - &sa->family); + &sa->family, &membar); if (rc != 0) goto fail_family; - sfc_log_init(sa, "family is %u", sa->family); + sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar); + + sfc_log_init(sa, "init mem bar"); + rc = sfc_mem_bar_init(sa, membar); + if (rc != 0) + goto fail_mem_bar_init; sfc_log_init(sa, "create nic"); rte_spinlock_init(&sa->nic_lock); @@ -788,7 +1014,7 @@ sfc_probe(struct sfc_adapter *sa) goto fail_mcdi_init; sfc_log_init(sa, "probe nic"); - rc = efx_nic_probe(enp); + rc = sfc_nic_probe(sa); if (rc != 0) goto fail_nic_probe; @@ -804,10 +1030,10 @@ fail_mcdi_init: efx_nic_destroy(enp); fail_nic_create: -fail_family: sfc_mem_bar_fini(sa); fail_mem_bar_init: +fail_family: sfc_log_init(sa, "failed %d", rc); return rc; } @@ -843,3 +1069,35 @@ sfc_unprobe(struct sfc_adapter *sa) sfc_flow_fini(sa); sa->state = SFC_ADAPTER_UNINITIALIZED; } + +uint32_t +sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str, + uint32_t ll_default) +{ + size_t lt_prefix_str_size = strlen(lt_prefix_str); + size_t lt_str_size_max; + char *lt_str = NULL; + int ret; + + if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) { + ++lt_prefix_str_size; /* Reserve space for prefix separator */ + lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1; + } else { + return RTE_LOGTYPE_PMD; + } + + lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0); + if (lt_str == NULL) + return RTE_LOGTYPE_PMD; + + strncpy(lt_str, lt_prefix_str, lt_prefix_str_size); + lt_str[lt_prefix_str_size - 1] = '.'; + rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size, + lt_str_size_max - lt_prefix_str_size); + lt_str[lt_str_size_max - 1] = '\0'; + + ret = rte_log_register_type_and_pick_level(lt_str, ll_default); + rte_free(lt_str); + + return (ret < 0) ? RTE_LOGTYPE_PMD : ret; +} diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h index 75575349..51be4403 100644 --- a/drivers/net/sfc/sfc.h +++ b/drivers/net/sfc/sfc.h @@ -27,11 +27,6 @@ extern "C" { #endif -#if EFSYS_OPT_RX_SCALE -/** RSS hash offloads mask */ -#define SFC_RSS_OFFLOADS (ETH_RSS_IP | ETH_RSS_TCP) -#endif - /* * +---------------+ * | UNINITIALIZED |<-----------+ @@ -104,7 +99,7 @@ struct sfc_mcdi { efsys_mem_t mem; enum sfc_mcdi_state state; efx_mcdi_transport_t transport; - bool logging; + uint32_t logtype; uint32_t proxy_handle; efx_rc_t proxy_result; }; @@ -156,6 +151,24 @@ struct sfc_port { uint32_t mac_stats_mask[EFX_MAC_STATS_MASK_NPAGES]; }; +struct sfc_rss_hf_rte_to_efx { + uint64_t rte; + efx_rx_hash_type_t efx; +}; + +struct sfc_rss { + unsigned int channels; + efx_rx_scale_context_type_t context_type; + efx_rx_hash_support_t hash_support; + efx_rx_hash_alg_t hash_alg; + unsigned int hf_map_nb_entries; + struct sfc_rss_hf_rte_to_efx *hf_map; + + efx_rx_hash_type_t hash_types; + unsigned int tbl[EFX_RSS_TBL_SIZE]; + uint8_t key[EFX_RSS_KEY_SIZE]; +}; + /* Adapter private data */ struct sfc_adapter { /* @@ -170,7 +183,7 @@ struct sfc_adapter { uint16_t port_id; struct rte_eth_dev *eth_dev; struct rte_kvargs *kvargs; - bool debug_init; + uint32_t logtype_main; int socket_id; efsys_bar_t mem_bar; efx_family_t family; @@ -225,15 +238,9 @@ struct sfc_adapter { boolean_t tso; - unsigned int rss_channels; + uint32_t rxd_wait_timeout_ns; -#if EFSYS_OPT_RX_SCALE - efx_rx_scale_context_type_t rss_support; - efx_rx_hash_support_t hash_support; - efx_rx_hash_type_t rss_hash_types; - unsigned int rss_tbl[EFX_RSS_TBL_SIZE]; - uint8_t rss_key[EFX_RSS_KEY_SIZE]; -#endif + struct sfc_rss rss; /* * Shared memory copy of the Rx datapath name to be used by @@ -302,6 +309,10 @@ int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id, size_t len, int socket_id, efsys_mem_t *esmp); void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp); +uint32_t sfc_register_logtype(struct sfc_adapter *sa, + const char *lt_prefix_str, + uint32_t ll_default); + int sfc_probe(struct sfc_adapter *sa); void sfc_unprobe(struct sfc_adapter *sa); int sfc_attach(struct sfc_adapter *sa); diff --git a/drivers/net/sfc/sfc_dp.c b/drivers/net/sfc/sfc_dp.c index 9a5ca20b..b121dc09 100644 --- a/drivers/net/sfc/sfc_dp.c +++ b/drivers/net/sfc/sfc_dp.c @@ -14,6 +14,7 @@ #include #include "sfc_dp.h" +#include "sfc_log.h" void sfc_dp_queue_init(struct sfc_dp_queue *dpq, uint16_t port_id, uint16_t queue_id, @@ -63,8 +64,8 @@ int sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry) { if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) { - rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, - "sfc %s dapapath '%s' already registered\n", + SFC_GENERIC_LOG(ERR, + "sfc %s dapapath '%s' already registered", entry->type == SFC_DP_RX ? "Rx" : entry->type == SFC_DP_TX ? "Tx" : "unknown", diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h index b142532d..3da65abe 100644 --- a/drivers/net/sfc/sfc_dp.h +++ b/drivers/net/sfc/sfc_dp.h @@ -15,6 +15,8 @@ #include +#include "sfc_log.h" + #ifdef __cplusplus extern "C" { #endif @@ -58,10 +60,10 @@ void sfc_dp_queue_init(struct sfc_dp_queue *dpq, const struct sfc_dp_queue *_dpq = (dpq); \ const struct rte_pci_addr *_addr = &(_dpq)->pci_addr; \ \ - RTE_LOG(level, PMD, \ + SFC_GENERIC_LOG(level, \ RTE_FMT("%s " PCI_PRI_FMT \ " #%" PRIu16 ".%" PRIu16 ": " \ - RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ + RTE_FMT_HEAD(__VA_ARGS__ ,), \ dp_name, \ _addr->domain, _addr->bus, \ _addr->devid, _addr->function, \ @@ -77,7 +79,8 @@ struct sfc_dp { enum sfc_dp_type type; /* Mask of required hardware/firmware capabilities */ unsigned int hw_fw_caps; -#define SFC_DP_HW_FW_CAP_EF10 0x1 +#define SFC_DP_HW_FW_CAP_EF10 0x1 +#define SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER 0x2 }; /** List of datapath variants */ diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h index be725dcb..83faad16 100644 --- a/drivers/net/sfc/sfc_dp_rx.h +++ b/drivers/net/sfc/sfc_dp_rx.h @@ -78,6 +78,8 @@ struct sfc_dp_rx_qcreate_info { * doorbell */ volatile void *mem_bar; + /** VI window size shift */ + unsigned int vi_window_shift; }; /** @@ -87,11 +89,24 @@ struct sfc_dp_rx_qcreate_info { */ typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); +/** + * Test if an Rx datapath supports specific mempool ops. + * + * @param pool The name of the pool operations to test. + * + * @return Check status. + * @retval 0 Best mempool ops choice. + * @retval 1 Mempool ops are supported. + * @retval -ENOTSUP Mempool ops not supported. + */ +typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool); + /** * Get size of receive and event queue rings by the number of Rx - * descriptors. + * descriptors and mempool configuration. * * @param nb_rx_desc Number of Rx descriptors + * @param mb_pool mbuf pool with Rx buffers * @param rxq_entries Location for number of Rx ring entries * @param evq_entries Location for number of event ring entries * @param rxq_max_fill_level Location for maximum Rx ring fill level @@ -99,6 +114,7 @@ typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); * @return 0 or positive errno. */ typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc, + struct rte_mempool *mb_pool, unsigned int *rxq_entries, unsigned int *evq_entries, unsigned int *rxq_max_fill_level); @@ -145,6 +161,12 @@ typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq, */ typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id); +/** + * Packed stream receive event handler used during queue flush only. + */ +typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq, + unsigned int id); + /** * Receive queue purge function called after queue flush. * @@ -171,13 +193,17 @@ struct sfc_dp_rx { #define SFC_DP_RX_FEAT_SCATTER 0x1 #define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2 #define SFC_DP_RX_FEAT_TUNNELS 0x4 +#define SFC_DP_RX_FEAT_FLOW_FLAG 0x8 +#define SFC_DP_RX_FEAT_FLOW_MARK 0x10 sfc_dp_rx_get_dev_info_t *get_dev_info; + sfc_dp_rx_pool_ops_supported_t *pool_ops_supported; sfc_dp_rx_qsize_up_rings_t *qsize_up_rings; sfc_dp_rx_qcreate_t *qcreate; sfc_dp_rx_qdestroy_t *qdestroy; sfc_dp_rx_qstart_t *qstart; sfc_dp_rx_qstop_t *qstop; sfc_dp_rx_qrx_ev_t *qrx_ev; + sfc_dp_rx_qrx_ps_ev_t *qrx_ps_ev; sfc_dp_rx_qpurge_t *qpurge; sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get; sfc_dp_rx_qdesc_npending_t *qdesc_npending; @@ -203,6 +229,7 @@ sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps) extern struct sfc_dp_rx sfc_efx_rx; extern struct sfc_dp_rx sfc_ef10_rx; +extern struct sfc_dp_rx sfc_ef10_essb_rx; #ifdef __cplusplus } diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h index 0c1aad90..a075612c 100644 --- a/drivers/net/sfc/sfc_dp_tx.h +++ b/drivers/net/sfc/sfc_dp_tx.h @@ -57,6 +57,8 @@ struct sfc_dp_tx_qcreate_info { unsigned int hw_index; /** Virtual address of the memory-mapped BAR to push Tx doorbell */ volatile void *mem_bar; + /** VI window size shift */ + unsigned int vi_window_shift; }; /** diff --git a/drivers/net/sfc/sfc_ef10.h b/drivers/net/sfc/sfc_ef10.h index ace6a1dd..a73e0bde 100644 --- a/drivers/net/sfc/sfc_ef10.h +++ b/drivers/net/sfc/sfc_ef10.h @@ -79,6 +79,40 @@ sfc_ef10_ev_present(const efx_qword_t ev) ~EFX_QWORD_FIELD(ev, EFX_DWORD_1); } + +/** + * Alignment requirement for value written to RX WPTR: + * the WPTR must be aligned to an 8 descriptor boundary. + */ +#define SFC_EF10_RX_WPTR_ALIGN 8u + +static inline void +sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added, + unsigned int ptr_mask) +{ + efx_dword_t dword; + + /* Hardware has alignment restriction for WPTR */ + RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0); + SFC_ASSERT(RTE_ALIGN(added, SFC_EF10_RX_WPTR_ALIGN) == added); + + EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, added & ptr_mask); + + /* DMA sync to device is not required */ + + /* + * rte_write32() has rte_io_wmb() which guarantees that the STORE + * operations (i.e. Rx and event descriptor updates) that precede + * the rte_io_wmb() call are visible to NIC before the STORE + * operations that follow it (i.e. doorbell write). + */ + rte_write32(dword.ed_u32[0], doorbell); +} + + +const uint32_t * sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps); + + #ifdef __cplusplus } #endif diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c new file mode 100644 index 00000000..5f5af602 --- /dev/null +++ b/drivers/net/sfc/sfc_ef10_essb_rx.c @@ -0,0 +1,700 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2017-2018 Solarflare Communications Inc. + * All rights reserved. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +/* EF10 equal stride packed stream receive native datapath implementation */ + +#include + +#include +#include +#include +#include + +#include "efx.h" +#include "efx_types.h" +#include "efx_regs.h" +#include "efx_regs_ef10.h" + +#include "sfc_tweak.h" +#include "sfc_dp_rx.h" +#include "sfc_kvargs.h" +#include "sfc_ef10.h" + +/* Tunnels are not supported */ +#define SFC_EF10_RX_EV_ENCAP_SUPPORT 0 +#include "sfc_ef10_rx_ev.h" + +#define sfc_ef10_essb_rx_err(dpq, ...) \ + SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__) + +#define sfc_ef10_essb_rx_info(dpq, ...) \ + SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__) + +/* + * Fake length for RXQ descriptors in equal stride super-buffer mode + * to make hardware happy. + */ +#define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32 + +/** + * Minimum number of Rx buffers the datapath allows to use. + * + * Each HW Rx descriptor has many Rx buffers. The number of buffers + * in one HW Rx descriptor is equal to size of contiguous block + * provided by Rx buffers memory pool. The contiguous block size + * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf + * data size specified on the memory pool creation. Typical rte_mbuf + * data size is about 2k which makes a bit less than 32 buffers in + * contiguous block with default bucket size equal to 64k. + * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN), + * it makes about 256 as required minimum. Double it in advertised + * minimum to allow for at least 2 refill blocks. + */ +#define SFC_EF10_ESSB_RX_DESCS_MIN 512 + +/** + * Number of Rx buffers should be aligned to. + * + * There are no extra requirements on alignment since actual number of + * pushed Rx buffers will be multiple by contiguous block size which + * is unknown beforehand. + */ +#define SFC_EF10_ESSB_RX_DESCS_ALIGN 1 + +/** + * Maximum number of descriptors/buffers in the Rx ring. + * It should guarantee that corresponding event queue never overfill. + */ +#define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \ + ((_nevs) - 1 /* head must not step on tail */ - \ + (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \ + 1 /* Rx error */ - 1 /* flush */) + +struct sfc_ef10_essb_rx_sw_desc { + struct rte_mbuf *first_mbuf; +}; + +struct sfc_ef10_essb_rxq { + /* Used on data path */ + unsigned int flags; +#define SFC_EF10_ESSB_RXQ_STARTED 0x1 +#define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2 +#define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4 + unsigned int rxq_ptr_mask; + unsigned int block_size; + unsigned int buf_stride; + unsigned int bufs_ptr; + unsigned int completed; + unsigned int pending_id; + unsigned int bufs_pending; + unsigned int left_in_completed; + unsigned int left_in_pending; + unsigned int evq_read_ptr; + unsigned int evq_ptr_mask; + efx_qword_t *evq_hw_ring; + struct sfc_ef10_essb_rx_sw_desc *sw_ring; + uint16_t port_id; + + /* Used on refill */ + unsigned int added; + unsigned int max_fill_level; + unsigned int refill_threshold; + struct rte_mempool *refill_mb_pool; + efx_qword_t *rxq_hw_ring; + volatile void *doorbell; + + /* Datapath receive queue anchor */ + struct sfc_dp_rxq dp; +}; + +static inline struct sfc_ef10_essb_rxq * +sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq) +{ + return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp); +} + +static struct rte_mbuf * +sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq, + struct rte_mbuf *mbuf) +{ + return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride); +} + +static struct rte_mbuf * +sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq, + struct rte_mbuf *mbuf, unsigned int idx) +{ + return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride); +} + +static struct rte_mbuf * +sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq) +{ + const struct sfc_ef10_essb_rx_sw_desc *rxd; + + if (rxq->left_in_completed != 0) { + rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; + return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_completed); + } else { + rxq->completed++; + rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; + rxq->left_in_completed = rxq->block_size; + return rxd->first_mbuf; + } +} + +static void +sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq) +{ + const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask; + unsigned int free_space; + unsigned int bulks; + void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN]; + unsigned int added = rxq->added; + + free_space = rxq->max_fill_level - (added - rxq->completed); + + if (free_space < rxq->refill_threshold) + return; + + bulks = free_space / RTE_DIM(mbuf_blocks); + /* refill_threshold guarantees that bulks is positive */ + SFC_ASSERT(bulks > 0); + + do { + unsigned int id; + unsigned int i; + + if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool, + mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) { + struct rte_eth_dev_data *dev_data = + rte_eth_devices[rxq->port_id].data; + + /* + * It is hardly a safe way to increment counter + * from different contexts, but all PMDs do it. + */ + dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks); + /* Return if we have posted nothing yet */ + if (added == rxq->added) + return; + /* Push posted */ + break; + } + + for (i = 0, id = added & rxq_ptr_mask; + i < RTE_DIM(mbuf_blocks); + ++i, ++id) { + struct rte_mbuf *m = mbuf_blocks[i]; + struct sfc_ef10_essb_rx_sw_desc *rxd; + + SFC_ASSERT((id & ~rxq_ptr_mask) == 0); + rxd = &rxq->sw_ring[id]; + rxd->first_mbuf = m; + + /* RX_KER_BYTE_CNT is ignored by firmware */ + EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id], + ESF_DZ_RX_KER_BYTE_CNT, + SFC_EF10_ESSB_RX_FAKE_BUF_SIZE, + ESF_DZ_RX_KER_BUF_ADDR, + rte_mbuf_data_iova_default(m)); + } + + added += RTE_DIM(mbuf_blocks); + + } while (--bulks > 0); + + SFC_ASSERT(rxq->added != added); + rxq->added = added; + sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask); +} + +static bool +sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev) +{ + *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask]; + + if (!sfc_ef10_ev_present(*rx_ev)) + return false; + + if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) != + FSE_AZ_EV_CODE_RX_EV)) { + /* + * Do not move read_ptr to keep the event for exception + * handling + */ + rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION; + sfc_ef10_essb_rx_err(&rxq->dp.dpq, + "RxQ exception at EvQ read ptr %#x", + rxq->evq_read_ptr); + return false; + } + + rxq->evq_read_ptr++; + return true; +} + +static void +sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev) +{ + unsigned int ready; + + ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - + rxq->bufs_ptr) & + EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); + + rxq->bufs_ptr += ready; + rxq->bufs_pending += ready; + + SFC_ASSERT(ready > 0); + do { + const struct sfc_ef10_essb_rx_sw_desc *rxd; + struct rte_mbuf *m; + unsigned int todo_bufs; + struct rte_mbuf *m0; + + rxd = &rxq->sw_ring[rxq->pending_id]; + m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_pending); + + if (ready < rxq->left_in_pending) { + todo_bufs = ready; + ready = 0; + rxq->left_in_pending -= todo_bufs; + } else { + todo_bufs = rxq->left_in_pending; + ready -= todo_bufs; + rxq->left_in_pending = rxq->block_size; + if (rxq->pending_id != rxq->rxq_ptr_mask) + rxq->pending_id++; + else + rxq->pending_id = 0; + } + + SFC_ASSERT(todo_bufs > 0); + --todo_bufs; + + sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull); + + /* Prefetch pseudo-header */ + rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM); + + m0 = m; + while (todo_bufs-- > 0) { + m = sfc_ef10_essb_next_mbuf(rxq, m); + m->ol_flags = m0->ol_flags; + m->packet_type = m0->packet_type; + /* Prefetch pseudo-header */ + rte_prefetch0((uint8_t *)m->buf_addr + + RTE_PKTMBUF_HEADROOM); + } + } while (ready > 0); +} + +static unsigned int +sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + unsigned int n_rx_pkts = 0; + unsigned int todo_bufs; + struct rte_mbuf *m; + + while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts, + rxq->bufs_pending)) > 0) { + m = sfc_ef10_essb_maybe_next_completed(rxq); + + todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed); + + rxq->bufs_pending -= todo_bufs; + rxq->left_in_completed -= todo_bufs; + + SFC_ASSERT(todo_bufs > 0); + todo_bufs--; + + do { + const efx_qword_t *qwordp; + uint16_t pkt_len; + + rx_pkts[n_rx_pkts++] = m; + + /* Parse pseudo-header */ + qwordp = (const efx_qword_t *) + ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM); + pkt_len = + EFX_QWORD_FIELD(*qwordp, + ES_EZ_ESSB_RX_PREFIX_DATA_LEN); + + m->data_off = RTE_PKTMBUF_HEADROOM + + ES_EZ_ESSB_RX_PREFIX_LEN; + m->port = rxq->port_id; + + rte_pktmbuf_pkt_len(m) = pkt_len; + rte_pktmbuf_data_len(m) = pkt_len; + + m->ol_flags |= + (PKT_RX_RSS_HASH * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) | + (PKT_RX_FDIR_ID * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) | + (PKT_RX_FDIR * + !!EFX_TEST_QWORD_BIT(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN)); + + /* EFX_QWORD_FIELD converts little-endian to CPU */ + m->hash.rss = + EFX_QWORD_FIELD(*qwordp, + ES_EZ_ESSB_RX_PREFIX_HASH); + m->hash.fdir.hi = + EFX_QWORD_FIELD(*qwordp, + ES_EZ_ESSB_RX_PREFIX_MARK); + + m = sfc_ef10_essb_next_mbuf(rxq, m); + } while (todo_bufs-- > 0); + } + + return n_rx_pkts; +} + + +static uint16_t +sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue); + const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; + uint16_t n_rx_pkts; + efx_qword_t rx_ev; + + if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING | + SFC_EF10_ESSB_RXQ_EXCEPTION))) + return 0; + + n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts); + + while (n_rx_pkts != nb_pkts && + sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) { + /* + * DROP_EVENT is an internal to the NIC, software should + * never see it and, therefore, may ignore it. + */ + + sfc_ef10_essb_rx_process_ev(rxq, rx_ev); + n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq, + rx_pkts + n_rx_pkts, + nb_pkts - n_rx_pkts); + } + + sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask, + evq_old_read_ptr, rxq->evq_read_ptr); + + /* It is not a problem if we refill in the case of exception */ + sfc_ef10_essb_rx_qrefill(rxq); + + return n_rx_pkts; +} + +static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending; +static unsigned int +sfc_ef10_essb_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq) +{ + /* + * Correct implementation requires EvQ polling and events + * processing. + */ + return -ENOTSUP; +} + +static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status; +static int +sfc_ef10_essb_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq, + __rte_unused uint16_t offset) +{ + return -ENOTSUP; +} + +static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info; +static void +sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info) +{ + /* + * Number of descriptors just defines maximum number of pushed + * descriptors (fill level). + */ + dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN; + dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN; +} + +static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported; +static int +sfc_ef10_essb_rx_pool_ops_supported(const char *pool) +{ + SFC_ASSERT(pool != NULL); + + if (strcmp(pool, "bucket") == 0) + return 0; + + return -ENOTSUP; +} + +static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings; +static int +sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc, + struct rte_mempool *mb_pool, + unsigned int *rxq_entries, + unsigned int *evq_entries, + unsigned int *rxq_max_fill_level) +{ + int rc; + struct rte_mempool_info mp_info; + unsigned int nb_hw_rx_desc; + unsigned int max_events; + + rc = rte_mempool_ops_get_info(mb_pool, &mp_info); + if (rc != 0) + return -rc; + if (mp_info.contig_block_size == 0) + return EINVAL; + + /* + * Calculate required number of hardware Rx descriptors each + * carrying contig block size Rx buffers. + * It cannot be less than Rx write pointer alignment plus 1 + * in order to avoid cases when the ring is guaranteed to be + * empty. + */ + nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc, + mp_info.contig_block_size), + SFC_EF10_RX_WPTR_ALIGN + 1); + if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) { + *rxq_entries = EFX_RXQ_MINNDESCS; + } else { + *rxq_entries = rte_align32pow2(nb_hw_rx_desc); + if (*rxq_entries > EFX_RXQ_MAXNDESCS) + return EINVAL; + } + + max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) * + mp_info.contig_block_size + + (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ + + 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */; + + *evq_entries = rte_align32pow2(max_events); + *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS); + *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS); + + /* + * May be even maximum event queue size is insufficient to handle + * so many Rx descriptors. If so, we should limit Rx queue fill level. + */ + *rxq_max_fill_level = RTE_MIN(nb_rx_desc, + SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries)); + return 0; +} + +static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate; +static int +sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id, + const struct rte_pci_addr *pci_addr, int socket_id, + const struct sfc_dp_rx_qcreate_info *info, + struct sfc_dp_rxq **dp_rxqp) +{ + struct rte_mempool * const mp = info->refill_mb_pool; + struct rte_mempool_info mp_info; + struct sfc_ef10_essb_rxq *rxq; + int rc; + + rc = rte_mempool_ops_get_info(mp, &mp_info); + if (rc != 0) { + /* Positive errno is used in the driver */ + rc = -rc; + goto fail_get_contig_block_size; + } + + /* Check if the mempool provides block dequeue */ + rc = EINVAL; + if (mp_info.contig_block_size == 0) + goto fail_no_block_dequeue; + + rc = ENOMEM; + rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) + goto fail_rxq_alloc; + + sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr); + + rc = ENOMEM; + rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring", + info->rxq_entries, + sizeof(*rxq->sw_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq->sw_ring == NULL) + goto fail_desc_alloc; + + rxq->block_size = mp_info.contig_block_size; + rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size; + rxq->rxq_ptr_mask = info->rxq_entries - 1; + rxq->evq_ptr_mask = info->evq_entries - 1; + rxq->evq_hw_ring = info->evq_hw_ring; + rxq->port_id = port_id; + + rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size; + rxq->refill_threshold = + RTE_MAX(info->refill_threshold / mp_info.contig_block_size, + SFC_EF10_RX_WPTR_ALIGN); + rxq->refill_mb_pool = mp; + rxq->rxq_hw_ring = info->rxq_hw_ring; + + rxq->doorbell = (volatile uint8_t *)info->mem_bar + + ER_DZ_RX_DESC_UPD_REG_OFST + + (info->hw_index << info->vi_window_shift); + + sfc_ef10_essb_rx_info(&rxq->dp.dpq, + "block size is %u, buf stride is %u", + rxq->block_size, rxq->buf_stride); + sfc_ef10_essb_rx_info(&rxq->dp.dpq, + "max fill level is %u descs (%u bufs), " + "refill threashold %u descs (%u bufs)", + rxq->max_fill_level, + rxq->max_fill_level * rxq->block_size, + rxq->refill_threshold, + rxq->refill_threshold * rxq->block_size); + + *dp_rxqp = &rxq->dp; + return 0; + +fail_desc_alloc: + rte_free(rxq); + +fail_rxq_alloc: +fail_no_block_dequeue: +fail_get_contig_block_size: + return rc; +} + +static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy; +static void +sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + + rte_free(rxq->sw_ring); + rte_free(rxq); +} + +static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart; +static int +sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + + rxq->evq_read_ptr = evq_read_ptr; + + /* Initialize before refill */ + rxq->completed = rxq->pending_id = rxq->added = 0; + rxq->left_in_completed = rxq->left_in_pending = rxq->block_size; + rxq->bufs_ptr = UINT_MAX; + rxq->bufs_pending = 0; + + sfc_ef10_essb_rx_qrefill(rxq); + + rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED; + rxq->flags &= + ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION); + + return 0; +} + +static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop; +static void +sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + + rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING; + + *evq_read_ptr = rxq->evq_read_ptr; +} + +static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev; +static bool +sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id) +{ + __rte_unused struct sfc_ef10_essb_rxq *rxq; + + rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING); + + /* + * It is safe to ignore Rx event since we free all mbufs on + * queue purge anyway. + */ + + return false; +} + +static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge; +static void +sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq) +{ + struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); + unsigned int i, j; + const struct sfc_ef10_essb_rx_sw_desc *rxd; + struct rte_mbuf *m; + + if (rxq->completed != rxq->added && rxq->left_in_completed > 0) { + rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; + m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, + rxq->block_size - rxq->left_in_completed); + do { + rxq->left_in_completed--; + rte_mempool_put(rxq->refill_mb_pool, m); + m = sfc_ef10_essb_next_mbuf(rxq, m); + } while (rxq->left_in_completed > 0); + rxq->completed++; + } + + for (i = rxq->completed; i != rxq->added; ++i) { + rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask]; + m = rxd->first_mbuf; + for (j = 0; j < rxq->block_size; ++j) { + rte_mempool_put(rxq->refill_mb_pool, m); + m = sfc_ef10_essb_next_mbuf(rxq, m); + } + } + + rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED; +} + +struct sfc_dp_rx sfc_ef10_essb_rx = { + .dp = { + .name = SFC_KVARG_DATAPATH_EF10_ESSB, + .type = SFC_DP_RX, + .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 | + SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER, + }, + .features = SFC_DP_RX_FEAT_FLOW_FLAG | + SFC_DP_RX_FEAT_FLOW_MARK, + .get_dev_info = sfc_ef10_essb_rx_get_dev_info, + .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported, + .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings, + .qcreate = sfc_ef10_essb_rx_qcreate, + .qdestroy = sfc_ef10_essb_rx_qdestroy, + .qstart = sfc_ef10_essb_rx_qstart, + .qstop = sfc_ef10_essb_rx_qstop, + .qrx_ev = sfc_ef10_essb_rx_qrx_ev, + .qpurge = sfc_ef10_essb_rx_qpurge, + .supported_ptypes_get = sfc_ef10_supported_ptypes_get, + .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending, + .qdesc_status = sfc_ef10_essb_rx_qdesc_status, + .pkt_burst = sfc_ef10_essb_recv_pkts, +}; diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c index 0b3e8fbb..42b35b9b 100644 --- a/drivers/net/sfc/sfc_ef10_rx.c +++ b/drivers/net/sfc/sfc_ef10_rx.c @@ -26,15 +26,12 @@ #include "sfc_kvargs.h" #include "sfc_ef10.h" +#define SFC_EF10_RX_EV_ENCAP_SUPPORT 1 +#include "sfc_ef10_rx_ev.h" + #define sfc_ef10_rx_err(dpq, ...) \ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__) -/** - * Alignment requirement for value written to RX WPTR: - * the WPTR must be aligned to an 8 descriptor boundary. - */ -#define SFC_EF10_RX_WPTR_ALIGN 8 - /** * Maximum number of descriptors/buffers in the Rx ring. * It should guarantee that corresponding event queue never overfill. @@ -87,29 +84,6 @@ sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq) return container_of(dp_rxq, struct sfc_ef10_rxq, dp); } -static void -sfc_ef10_rx_qpush(struct sfc_ef10_rxq *rxq) -{ - efx_dword_t dword; - - /* Hardware has alignment restriction for WPTR */ - RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0); - SFC_ASSERT(RTE_ALIGN(rxq->added, SFC_EF10_RX_WPTR_ALIGN) == rxq->added); - - EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, - rxq->added & rxq->ptr_mask); - - /* DMA sync to device is not required */ - - /* - * rte_write32() has rte_io_wmb() which guarantees that the STORE - * operations (i.e. Rx and event descriptor updates) that precede - * the rte_io_wmb() call are visible to NIC before the STORE - * operations that follow it (i.e. doorbell write). - */ - rte_write32(dword.ed_u32[0], rxq->doorbell); -} - static void sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) { @@ -120,6 +94,8 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) void *objs[SFC_RX_REFILL_BULK]; unsigned int added = rxq->added; + RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0); + free_space = rxq->max_fill_level - (added - rxq->completed); if (free_space < rxq->refill_threshold) @@ -178,7 +154,7 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq) SFC_ASSERT(rxq->added != added); rxq->added = added; - sfc_ef10_rx_qpush(rxq); + sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask); } static void @@ -225,137 +201,6 @@ sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts, return n_rx_pkts; } -static void -sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev, - struct rte_mbuf *m) -{ - uint32_t tun_ptype = 0; - /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */ - int8_t ip_csum_err_bit; - /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */ - int8_t l4_csum_err_bit; - uint32_t l2_ptype = 0; - uint32_t l3_ptype = 0; - uint32_t l4_ptype = 0; - uint64_t ol_flags = 0; - - if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN))) - goto done; - - switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) { - default: - /* Unexpected encapsulation tag class */ - SFC_ASSERT(false); - /* FALLTHROUGH */ - case ESE_EZ_ENCAP_HDR_NONE: - break; - case ESE_EZ_ENCAP_HDR_VXLAN: - /* - * It is definitely UDP, but we have no information - * about IPv4 vs IPv6 and VLAN tagging. - */ - tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP; - break; - case ESE_EZ_ENCAP_HDR_GRE: - /* - * We have no information about IPv4 vs IPv6 and VLAN tagging. - */ - tun_ptype = RTE_PTYPE_TUNNEL_NVGRE; - break; - } - - if (tun_ptype == 0) { - ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN; - l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN; - } else { - ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN; - l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN; - if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, - ESF_DZ_RX_IPCKSUM_ERR_LBN))) - ol_flags |= PKT_RX_EIP_CKSUM_BAD; - } - - switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) { - case ESE_DZ_ETH_TAG_CLASS_NONE: - l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER : - RTE_PTYPE_INNER_L2_ETHER; - break; - case ESE_DZ_ETH_TAG_CLASS_VLAN1: - l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN : - RTE_PTYPE_INNER_L2_ETHER_VLAN; - break; - case ESE_DZ_ETH_TAG_CLASS_VLAN2: - l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ : - RTE_PTYPE_INNER_L2_ETHER_QINQ; - break; - default: - /* Unexpected Eth tag class */ - SFC_ASSERT(false); - } - - switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) { - case ESE_DZ_L3_CLASS_IP4_FRAG: - l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : - RTE_PTYPE_INNER_L4_FRAG; - /* FALLTHROUGH */ - case ESE_DZ_L3_CLASS_IP4: - l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; - ol_flags |= PKT_RX_RSS_HASH | - ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ? - PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD); - break; - case ESE_DZ_L3_CLASS_IP6_FRAG: - l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : - RTE_PTYPE_INNER_L4_FRAG; - /* FALLTHROUGH */ - case ESE_DZ_L3_CLASS_IP6: - l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; - ol_flags |= PKT_RX_RSS_HASH; - break; - case ESE_DZ_L3_CLASS_ARP: - /* Override Layer 2 packet type */ - /* There is no ARP classification for inner packets */ - if (tun_ptype == 0) - l2_ptype = RTE_PTYPE_L2_ETHER_ARP; - break; - default: - /* Unexpected Layer 3 class */ - SFC_ASSERT(false); - } - - switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L4_CLASS)) { - case ESE_DZ_L4_CLASS_TCP: - l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP : - RTE_PTYPE_INNER_L4_TCP; - ol_flags |= - (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? - PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; - break; - case ESE_DZ_L4_CLASS_UDP: - l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP : - RTE_PTYPE_INNER_L4_UDP; - ol_flags |= - (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? - PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; - break; - case ESE_DZ_L4_CLASS_UNKNOWN: - break; - default: - /* Unexpected Layer 4 class */ - SFC_ASSERT(false); - } - - /* Remove RSS hash offload flag if RSS is not enabled */ - if (~rxq->flags & SFC_EF10_RXQ_RSS_HASH) - ol_flags &= ~PKT_RX_RSS_HASH; - -done: - m->ol_flags = ol_flags; - m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype; -} - static uint16_t sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr) { @@ -414,7 +259,10 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev, m->rearm_data[0] = rxq->rearm_data; /* Classify packet based on Rx event */ - sfc_ef10_rx_ev_to_offloads(rxq, rx_ev, m); + /* Mask RSS hash offload flag if RSS is not enabled */ + sfc_ef10_rx_ev_to_offloads(rx_ev, m, + (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ? + ~0ull : ~PKT_RX_RSS_HASH); /* data_off already moved past pseudo header */ pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM; @@ -538,7 +386,7 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return n_rx_pkts; } -static const uint32_t * +const uint32_t * sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps) { static const uint32_t ef10_native_ptypes[] = { @@ -587,8 +435,8 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps) 1u << EFX_TUNNEL_PROTOCOL_NVGRE): return ef10_overlay_ptypes; default: - RTE_LOG(ERR, PMD, - "Unexpected set of supported tunnel encapsulations: %#x\n", + SFC_GENERIC_LOG(ERR, + "Unexpected set of supported tunnel encapsulations: %#x", tunnel_encaps); /* FALLTHROUGH */ case 0: @@ -632,6 +480,7 @@ sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info) static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings; static int sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc, + __rte_unused struct rte_mempool *mb_pool, unsigned int *rxq_entries, unsigned int *evq_entries, unsigned int *rxq_max_fill_level) @@ -716,7 +565,7 @@ sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id, rxq->rxq_hw_ring = info->rxq_hw_ring; rxq->doorbell = (volatile uint8_t *)info->mem_bar + ER_DZ_RX_DESC_UPD_REG_OFST + - info->hw_index * ER_DZ_RX_DESC_UPD_REG_STEP; + (info->hw_index << info->vi_window_shift); *dp_rxqp = &rxq->dp; return 0; diff --git a/drivers/net/sfc/sfc_ef10_rx_ev.h b/drivers/net/sfc/sfc_ef10_rx_ev.h new file mode 100644 index 00000000..615bd29b --- /dev/null +++ b/drivers/net/sfc/sfc_ef10_rx_ev.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2018 Solarflare Communications Inc. + * All rights reserved. + * + * This software was jointly developed between OKTET Labs (under contract + * for Solarflare) and Solarflare Communications, Inc. + */ + +#ifndef _SFC_EF10_RX_EV_H +#define _SFC_EF10_RX_EV_H + +#include + +#include "efx_types.h" +#include "efx_regs.h" +#include "efx_regs_ef10.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void +sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m, + uint64_t ol_mask) +{ + uint32_t tun_ptype = 0; + /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */ + int8_t ip_csum_err_bit; + /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */ + int8_t l4_csum_err_bit; + uint32_t l2_ptype = 0; + uint32_t l3_ptype = 0; + uint32_t l4_ptype = 0; + uint64_t ol_flags = 0; + + if (unlikely(rx_ev.eq_u64[0] & + rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) | + (1ull << ESF_DZ_RX_ECRC_ERR_LBN) | + (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))) + goto done; + +#if SFC_EF10_RX_EV_ENCAP_SUPPORT + switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) { + default: + /* Unexpected encapsulation tag class */ + SFC_ASSERT(false); + /* FALLTHROUGH */ + case ESE_EZ_ENCAP_HDR_NONE: + break; + case ESE_EZ_ENCAP_HDR_VXLAN: + /* + * It is definitely UDP, but we have no information + * about IPv4 vs IPv6 and VLAN tagging. + */ + tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP; + break; + case ESE_EZ_ENCAP_HDR_GRE: + /* + * We have no information about IPv4 vs IPv6 and VLAN tagging. + */ + tun_ptype = RTE_PTYPE_TUNNEL_NVGRE; + break; + } +#endif + + if (tun_ptype == 0) { + ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN; + l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN; + } else { + ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN; + l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN; + if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, + ESF_DZ_RX_IPCKSUM_ERR_LBN))) + ol_flags |= PKT_RX_EIP_CKSUM_BAD; + } + + switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) { + case ESE_DZ_ETH_TAG_CLASS_NONE: + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER : + RTE_PTYPE_INNER_L2_ETHER; + break; + case ESE_DZ_ETH_TAG_CLASS_VLAN1: + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN : + RTE_PTYPE_INNER_L2_ETHER_VLAN; + break; + case ESE_DZ_ETH_TAG_CLASS_VLAN2: + l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ : + RTE_PTYPE_INNER_L2_ETHER_QINQ; + break; + default: + /* Unexpected Eth tag class */ + SFC_ASSERT(false); + } + + switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) { + case ESE_DZ_L3_CLASS_IP4_FRAG: + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : + RTE_PTYPE_INNER_L4_FRAG; + /* FALLTHROUGH */ + case ESE_DZ_L3_CLASS_IP4: + l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + ol_flags |= PKT_RX_RSS_HASH | + ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ? + PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD); + break; + case ESE_DZ_L3_CLASS_IP6_FRAG: + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG : + RTE_PTYPE_INNER_L4_FRAG; + /* FALLTHROUGH */ + case ESE_DZ_L3_CLASS_IP6: + l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + ol_flags |= PKT_RX_RSS_HASH; + break; + case ESE_DZ_L3_CLASS_ARP: + /* Override Layer 2 packet type */ + /* There is no ARP classification for inner packets */ + if (tun_ptype == 0) + l2_ptype = RTE_PTYPE_L2_ETHER_ARP; + break; + default: + /* Unexpected Layer 3 class */ + SFC_ASSERT(false); + } + + /* + * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only + * 2 bits wide on Medford2. Check it is safe to use the Medford2 field + * and values for all EF10 controllers. + */ + RTE_BUILD_BUG_ON(ESF_FZ_RX_L4_CLASS_LBN != ESF_DE_RX_L4_CLASS_LBN); + switch (EFX_QWORD_FIELD(rx_ev, ESF_FZ_RX_L4_CLASS)) { + case ESE_FZ_L4_CLASS_TCP: + RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_TCP != ESE_DE_L4_CLASS_TCP); + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP : + RTE_PTYPE_INNER_L4_TCP; + ol_flags |= + (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? + PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; + break; + case ESE_FZ_L4_CLASS_UDP: + RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UDP != ESE_DE_L4_CLASS_UDP); + l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP : + RTE_PTYPE_INNER_L4_UDP; + ol_flags |= + (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ? + PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD; + break; + case ESE_FZ_L4_CLASS_UNKNOWN: + RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UNKNOWN != + ESE_DE_L4_CLASS_UNKNOWN); + break; + default: + /* Unexpected Layer 4 class */ + SFC_ASSERT(false); + } + +done: + m->ol_flags = ol_flags & ol_mask; + m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype; +} + + +#ifdef __cplusplus +} +#endif +#endif /* _SFC_EF10_RX_EV_H */ diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index 12387972..d0daa3b3 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -531,7 +531,7 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id, txq->txq_hw_ring = info->txq_hw_ring; txq->doorbell = (volatile uint8_t *)info->mem_bar + ER_DZ_TX_DESC_UPD_REG_OFST + - info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP; + (info->hw_index << info->vi_window_shift); txq->evq_hw_ring = info->evq_hw_ring; *dp_txqp = &txq->dp; diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c index 89a45290..1b6499f8 100644 --- a/drivers/net/sfc/sfc_ethdev.c +++ b/drivers/net/sfc/sfc_ethdev.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "efx.h" @@ -27,6 +28,8 @@ #include "sfc_dp.h" #include "sfc_dp_rx.h" +uint32_t sfc_logtype_driver; + static struct sfc_dp_list sfc_dp_head = TAILQ_HEAD_INITIALIZER(sfc_dp_head); @@ -82,12 +85,11 @@ static void sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct sfc_adapter *sa = dev->data->dev_private; - const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + struct sfc_rss *rss = &sa->rss; uint64_t txq_offloads_def = 0; sfc_log_init(sa, "entry"); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; /* Autonegotiation may be disabled */ @@ -96,8 +98,14 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa |= ETH_LINK_SPEED_1G; if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX) dev_info->speed_capa |= ETH_LINK_SPEED_10G; + if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX) + dev_info->speed_capa |= ETH_LINK_SPEED_25G; if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX) dev_info->speed_capa |= ETH_LINK_SPEED_40G; + if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX) + dev_info->speed_capa |= ETH_LINK_SPEED_50G; + if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX) + dev_info->speed_capa |= ETH_LINK_SPEED_100G; dev_info->max_rx_queues = sa->rxq_max; dev_info->max_tx_queues = sa->txq_max; @@ -130,27 +138,17 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_txconf.offloads |= txq_offloads_def; - dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; - if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) || - !encp->enc_hw_tx_insert_vlan_enabled) - dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL; - - if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG) - dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS; - - if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) - dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP; + if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) { + uint64_t rte_hf = 0; + unsigned int i; - if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT) - dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT; + for (i = 0; i < rss->hf_map_nb_entries; ++i) + rte_hf |= rss->hf_map[i].rte; -#if EFSYS_OPT_RX_SCALE - if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) { dev_info->reta_size = EFX_RSS_TBL_SIZE; dev_info->hash_key_size = EFX_RSS_KEY_SIZE; - dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS; + dev_info->flow_type_rss_offloads = rte_hf; } -#endif /* Initialize to hardware limits */ dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS; @@ -236,22 +234,13 @@ static int sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct sfc_adapter *sa = dev->data->dev_private; - struct rte_eth_link *dev_link = &dev->data->dev_link; - struct rte_eth_link old_link; struct rte_eth_link current_link; + int ret; sfc_log_init(sa, "entry"); -retry: - EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); - *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link); - if (sa->state != SFC_ADAPTER_STARTED) { sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link); - if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, - *(uint64_t *)&old_link, - *(uint64_t *)¤t_link)) - goto retry; } else if (wait_to_complete) { efx_link_mode_t link_mode; @@ -259,21 +248,17 @@ retry: link_mode = EFX_LINK_UNKNOWN; sfc_port_link_mode_to_info(link_mode, ¤t_link); - if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link, - *(uint64_t *)&old_link, - *(uint64_t *)¤t_link)) - goto retry; } else { sfc_ev_mgmt_qpoll(sa); - *(int64_t *)¤t_link = - rte_atomic64_read((rte_atomic64_t *)dev_link); + rte_eth_linkstatus_get(dev, ¤t_link); } - if (old_link.link_status != current_link.link_status) - sfc_info(sa, "Link status is %s", - current_link.link_status ? "UP" : "DOWN"); + ret = rte_eth_linkstatus_set(dev, ¤t_link); + if (ret == 0) + sfc_notice(sa, "Link status is %s", + current_link.link_status ? "UP" : "DOWN"); - return old_link.link_status == current_link.link_status ? 0 : -1; + return ret; } static void @@ -664,7 +649,7 @@ sfc_xstats_get_names(struct rte_eth_dev *dev, for (i = 0; i < EFX_MAC_NSTATS; ++i) { if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) { if (xstats_names != NULL && nstats < xstats_count) - strncpy(xstats_names[nstats].name, + strlcpy(xstats_names[nstats].name, efx_mac_stat_name(sa->nic, i), sizeof(xstats_names[0].name)); nstats++; @@ -742,9 +727,8 @@ sfc_xstats_get_names_by_id(struct rte_eth_dev *dev, if ((ids == NULL) || (ids[nb_written] == nb_supported)) { char *name = xstats_names[nb_written++].name; - strncpy(name, efx_mac_stat_name(sa->nic, i), + strlcpy(name, efx_mac_stat_name(sa->nic, i), sizeof(xstats_names[0].name)); - name[sizeof(xstats_names[0].name) - 1] = '\0'; } ++nb_supported; @@ -920,13 +904,14 @@ fail_inval: SFC_ASSERT(rc > 0); return -rc; } -static void +static int sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct sfc_adapter *sa = dev->data->dev_private; const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); struct sfc_port *port = &sa->port; - int rc; + struct ether_addr *old_addr = &dev->data->mac_addrs[0]; + int rc = 0; sfc_adapter_lock(sa); @@ -936,15 +921,22 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) */ ether_addr_copy(mac_addr, &port->default_mac_addr); + /* + * Neither of the two following checks can return + * an error. The new MAC address is preserved in + * the device private data and can be activated + * on the next port start if the user prevents + * isolated mode from being enabled. + */ if (port->isolated) { - sfc_err(sa, "isolated mode is active on the port"); - sfc_err(sa, "will not set MAC address"); + sfc_warn(sa, "isolated mode is active on the port"); + sfc_warn(sa, "will not set MAC address"); goto unlock; } if (sa->state != SFC_ADAPTER_STARTED) { - sfc_info(sa, "the port is not started"); - sfc_info(sa, "the new MAC address will be set on port start"); + sfc_notice(sa, "the port is not started"); + sfc_notice(sa, "the new MAC address will be set on port start"); goto unlock; } @@ -962,8 +954,12 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) * we also need to update unicast filters */ rc = sfc_set_rx_mode(sa); - if (rc != 0) + if (rc != 0) { sfc_err(sa, "cannot set filter (rc = %u)", rc); + /* Rollback the old address */ + (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes); + (void)sfc_set_rx_mode(sa); + } } else { sfc_warn(sa, "cannot set MAC address with filters installed"); sfc_warn(sa, "adapter will be restarted to pick the new MAC"); @@ -982,14 +978,13 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) } unlock: - /* - * In the case of failure sa->port->default_mac_addr does not - * need rollback since no error code is returned, and the upper - * API will anyway update the external MAC address storage. - * To be consistent with that new value it is better to keep - * the device private value the same. - */ + if (rc != 0) + ether_addr_copy(old_addr, &port->default_mac_addr); + sfc_adapter_unlock(sa); + + SFC_ASSERT(rc >= 0); + return -rc; } @@ -1352,18 +1347,18 @@ sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT); } -#if EFSYS_OPT_RX_SCALE static int sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_rss *rss = &sa->rss; struct sfc_port *port = &sa->port; - if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated) + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated) return -ENOTSUP; - if (sa->rss_channels == 0) + if (rss->channels == 0) return -EINVAL; sfc_adapter_lock(sa); @@ -1374,10 +1369,10 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev, * flags which corresponds to the active EFX configuration stored * locally in 'sfc_adapter' and kept up-to-date */ - rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types); + rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(sa, rss->hash_types); rss_conf->rss_key_len = EFX_RSS_KEY_SIZE; if (rss_conf->rss_key != NULL) - rte_memcpy(rss_conf->rss_key, sa->rss_key, EFX_RSS_KEY_SIZE); + rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE); sfc_adapter_unlock(sa); @@ -1389,6 +1384,7 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_rss *rss = &sa->rss; struct sfc_port *port = &sa->port; unsigned int efx_hash_types; int rc = 0; @@ -1396,35 +1392,31 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, if (port->isolated) return -ENOTSUP; - if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) { + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { sfc_err(sa, "RSS is not available"); return -ENOTSUP; } - if (sa->rss_channels == 0) { + if (rss->channels == 0) { sfc_err(sa, "RSS is not configured"); return -EINVAL; } if ((rss_conf->rss_key != NULL) && - (rss_conf->rss_key_len != sizeof(sa->rss_key))) { + (rss_conf->rss_key_len != sizeof(rss->key))) { sfc_err(sa, "RSS key size is wrong (should be %lu)", - sizeof(sa->rss_key)); - return -EINVAL; - } - - if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) { - sfc_err(sa, "unsupported hash functions requested"); + sizeof(rss->key)); return -EINVAL; } sfc_adapter_lock(sa); - efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf); + rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types); + if (rc != 0) + goto fail_rx_hf_rte_to_efx; rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, - EFX_RX_HASHALG_TOEPLITZ, - efx_hash_types, B_TRUE); + rss->hash_alg, efx_hash_types, B_TRUE); if (rc != 0) goto fail_scale_mode_set; @@ -1433,15 +1425,15 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, rss_conf->rss_key, - sizeof(sa->rss_key)); + sizeof(rss->key)); if (rc != 0) goto fail_scale_key_set; } - rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key)); + rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key)); } - sa->rss_hash_types = efx_hash_types; + rss->hash_types = efx_hash_types; sfc_adapter_unlock(sa); @@ -1450,10 +1442,11 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev, fail_scale_key_set: if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, EFX_RX_HASHALG_TOEPLITZ, - sa->rss_hash_types, B_TRUE) != 0) + rss->hash_types, B_TRUE) != 0) sfc_err(sa, "failed to restore RSS mode"); fail_scale_mode_set: +fail_rx_hf_rte_to_efx: sfc_adapter_unlock(sa); return -rc; } @@ -1464,13 +1457,14 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev, uint16_t reta_size) { struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_rss *rss = &sa->rss; struct sfc_port *port = &sa->port; int entry; - if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated) + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated) return -ENOTSUP; - if (sa->rss_channels == 0) + if (rss->channels == 0) return -EINVAL; if (reta_size != EFX_RSS_TBL_SIZE) @@ -1483,7 +1477,7 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev, int grp_idx = entry % RTE_RETA_GROUP_SIZE; if ((reta_conf[grp].mask >> grp_idx) & 1) - reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry]; + reta_conf[grp].reta[grp_idx] = rss->tbl[entry]; } sfc_adapter_unlock(sa); @@ -1497,6 +1491,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, uint16_t reta_size) { struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_rss *rss = &sa->rss; struct sfc_port *port = &sa->port; unsigned int *rss_tbl_new; uint16_t entry; @@ -1506,12 +1501,12 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, if (port->isolated) return -ENOTSUP; - if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) { + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { sfc_err(sa, "RSS is not available"); return -ENOTSUP; } - if (sa->rss_channels == 0) { + if (rss->channels == 0) { sfc_err(sa, "RSS is not configured"); return -EINVAL; } @@ -1522,13 +1517,13 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, return -EINVAL; } - rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0); + rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0); if (rss_tbl_new == NULL) return -ENOMEM; sfc_adapter_lock(sa); - rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl)); + rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl)); for (entry = 0; entry < reta_size; entry++) { int grp_idx = entry % RTE_RETA_GROUP_SIZE; @@ -1537,7 +1532,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE]; if (grp->mask & (1ull << grp_idx)) { - if (grp->reta[grp_idx] >= sa->rss_channels) { + if (grp->reta[grp_idx] >= rss->channels) { rc = EINVAL; goto bad_reta_entry; } @@ -1552,7 +1547,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev, goto fail_scale_tbl_set; } - rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl)); + rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl)); fail_scale_tbl_set: bad_reta_entry: @@ -1563,7 +1558,6 @@ bad_reta_entry: SFC_ASSERT(rc >= 0); return -rc; } -#endif static int sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, @@ -1621,6 +1615,21 @@ sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, return -rc; } +static int +sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool) +{ + struct sfc_adapter *sa = dev->data->dev_private; + + /* + * If Rx datapath does not provide callback to check mempool, + * all pools are supported. + */ + if (sa->dp_rx->pool_ops_supported == NULL) + return 1; + + return sa->dp_rx->pool_ops_supported(pool); +} + static const struct eth_dev_ops sfc_eth_dev_ops = { .dev_configure = sfc_dev_configure, .dev_start = sfc_dev_start, @@ -1658,12 +1667,10 @@ static const struct eth_dev_ops sfc_eth_dev_ops = { .mac_addr_set = sfc_mac_addr_set, .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add, .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del, -#if EFSYS_OPT_RX_SCALE .reta_update = sfc_dev_rss_reta_update, .reta_query = sfc_dev_rss_reta_query, .rss_hash_update = sfc_dev_rss_hash_update, .rss_hash_conf_get = sfc_dev_rss_hash_conf_get, -#endif .filter_ctrl = sfc_dev_filter_ctrl, .set_mc_addr_list = sfc_set_mc_addr_list, .rxq_info_get = sfc_rx_queue_info_get, @@ -1671,6 +1678,7 @@ static const struct eth_dev_ops sfc_eth_dev_ops = { .fw_version_get = sfc_fw_version_get, .xstats_get_by_id = sfc_xstats_get_by_id, .xstats_get_names_by_id = sfc_xstats_get_names_by_id, + .pool_ops_supported = sfc_pool_ops_supported, }; /** @@ -1700,6 +1708,7 @@ static int sfc_eth_dev_set_ops(struct rte_eth_dev *dev) { struct sfc_adapter *sa = dev->data->dev_private; + const efx_nic_cfg_t *encp; unsigned int avail_caps = 0; const char *rx_name = NULL; const char *tx_name = NULL; @@ -1708,12 +1717,17 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) switch (sa->family) { case EFX_FAMILY_HUNTINGTON: case EFX_FAMILY_MEDFORD: + case EFX_FAMILY_MEDFORD2: avail_caps |= SFC_DP_HW_FW_CAP_EF10; break; default: break; } + encp = efx_nic_cfg_get(sa->nic); + if (encp->enc_rx_es_super_buffer_supported) + avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER; + rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH, sfc_kvarg_string_handler, &rx_name); if (rc != 0) @@ -1749,7 +1763,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) goto fail_dp_rx_name; } - sfc_info(sa, "use %s Rx datapath", sa->dp_rx_name); + sfc_notice(sa, "use %s Rx datapath", sa->dp_rx_name); dev->rx_pkt_burst = sa->dp_rx->pkt_burst; @@ -1788,7 +1802,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev) goto fail_dp_tx_name; } - sfc_info(sa, "use %s Tx datapath", sa->dp_tx_name); + sfc_notice(sa, "use %s Tx datapath", sa->dp_tx_name); dev->tx_pkt_burst = sa->dp_tx->pkt_burst; @@ -1903,6 +1917,7 @@ sfc_register_dp(void) /* Register once */ if (TAILQ_EMPTY(&sfc_dp_head)) { /* Prefer EF10 datapath */ + sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp); sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp); sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp); @@ -1935,15 +1950,13 @@ sfc_eth_dev_init(struct rte_eth_dev *dev) /* Copy PCI device info to the dev->data */ rte_eth_copy_pci_info(dev, pci_dev); + sa->logtype_main = sfc_register_logtype(sa, SFC_LOGTYPE_MAIN_STR, + RTE_LOG_NOTICE); + rc = sfc_kvargs_parse(sa); if (rc != 0) goto fail_kvargs_parse; - rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT, - sfc_kvarg_bool_handler, &sa->debug_init); - if (rc != 0) - goto fail_kvarg_debug_init; - sfc_log_init(sa, "entry"); dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0); @@ -1997,7 +2010,6 @@ fail_probe: dev->data->mac_addrs = NULL; fail_mac_addrs: -fail_kvarg_debug_init: sfc_kvargs_cleanup(sa); fail_kvargs_parse: @@ -2048,6 +2060,8 @@ static const struct rte_pci_id pci_id_sfc_efx_map[] = { { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) }, { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) }, { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) }, + { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) }, { .vendor_id = 0 /* sentinel */ } }; @@ -2079,6 +2093,17 @@ RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx, SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " " SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " " SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " " - SFC_KVARG_STATS_UPDATE_PERIOD_MS "= " - SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " " - SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL); + SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " " + SFC_KVARG_RXD_WAIT_TIMEOUT_NS "= " + SFC_KVARG_STATS_UPDATE_PERIOD_MS "="); + +RTE_INIT(sfc_driver_register_logtype); +static void +sfc_driver_register_logtype(void) +{ + int ret; + + ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver", + RTE_LOG_NOTICE); + sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret; +} diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c index 7abe61ae..f93d30e5 100644 --- a/drivers/net/sfc/sfc_ev.c +++ b/drivers/net/sfc/sfc_ev.c @@ -162,6 +162,35 @@ sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id, return evq->sa->dp_rx->qrx_ev(dp_rxq, id); } +static boolean_t +sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id, + uint32_t pkt_count, uint16_t flags) +{ + struct sfc_evq *evq = arg; + + sfc_err(evq->sa, + "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x", + evq->evq_index, label, id, pkt_count, flags); + return B_TRUE; +} + +/* It is not actually used on datapath, but required on RxQ flush */ +static boolean_t +sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id, + __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags) +{ + struct sfc_evq *evq = arg; + struct sfc_dp_rxq *dp_rxq; + + dp_rxq = evq->dp_rxq; + SFC_ASSERT(dp_rxq != NULL); + + if (evq->sa->dp_rx->qrx_ps_ev != NULL) + return evq->sa->dp_rx->qrx_ps_ev(dp_rxq, id); + else + return B_FALSE; +} + static boolean_t sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id) { @@ -382,27 +411,11 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) { struct sfc_evq *evq = arg; struct sfc_adapter *sa = evq->sa; - struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link; struct rte_eth_link new_link; - uint64_t new_link_u64; - uint64_t old_link_u64; - - EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); sfc_port_link_mode_to_info(link_mode, &new_link); - - new_link_u64 = *(uint64_t *)&new_link; - do { - old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link); - if (old_link_u64 == new_link_u64) - break; - - if (rte_atomic64_cmpset((volatile uint64_t *)dev_link, - old_link_u64, new_link_u64)) { - evq->sa->port.lsc_seq++; - break; - } - } while (B_TRUE); + if (rte_eth_linkstatus_set(sa->eth_dev, &new_link)) + evq->sa->port.lsc_seq++; return B_FALSE; } @@ -410,6 +423,7 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) static const efx_ev_callbacks_t sfc_ev_callbacks = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_nop_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, @@ -425,6 +439,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks = { static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_efx_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_nop_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_rxq_flush_done, @@ -440,6 +455,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_dp_rx, + .eec_rx_ps = sfc_ev_dp_rx_ps, .eec_tx = sfc_ev_nop_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_rxq_flush_done, @@ -455,6 +471,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, @@ -470,6 +487,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = { .eec_initialized = sfc_ev_initialized, .eec_rx = sfc_ev_nop_rx, + .eec_rx_ps = sfc_ev_nop_rx_ps, .eec_tx = sfc_ev_dp_tx, .eec_exception = sfc_ev_exception, .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, @@ -837,7 +855,7 @@ static int sfc_kvarg_perf_profile_handler(__rte_unused const char *key, const char *value_str, void *opaque) { - uint64_t *value = opaque; + uint32_t *value = opaque; if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c index 93cdf8f4..5613d59a 100644 --- a/drivers/net/sfc/sfc_flow.c +++ b/drivers/net/sfc/sfc_flow.c @@ -7,6 +7,7 @@ * for Solarflare) and Solarflare Communications, Inc. */ +#include #include #include #include @@ -22,13 +23,17 @@ #include "sfc_filter.h" #include "sfc_flow.h" #include "sfc_log.h" +#include "sfc_dp_rx.h" /* * At now flow API is implemented in such a manner that each - * flow rule is converted to a hardware filter. + * flow rule is converted to one or more hardware filters. * All elements of flow rule (attributes, pattern items, actions) * correspond to one or more fields in the efx_filter_spec_s structure * that is responsible for the hardware filter. + * If some required field is unset in the flow rule, then a handful + * of filter copies will be created to cover all possible values + * of such a field. */ enum sfc_flow_item_layers { @@ -57,6 +62,37 @@ static sfc_flow_item_parse sfc_flow_parse_ipv4; static sfc_flow_item_parse sfc_flow_parse_ipv6; static sfc_flow_item_parse sfc_flow_parse_tcp; static sfc_flow_item_parse sfc_flow_parse_udp; +static sfc_flow_item_parse sfc_flow_parse_vxlan; +static sfc_flow_item_parse sfc_flow_parse_geneve; +static sfc_flow_item_parse sfc_flow_parse_nvgre; + +typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error); + +typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match, + efx_filter_spec_t *spec, + struct sfc_filter *filter); + +struct sfc_flow_copy_flag { + /* EFX filter specification match flag */ + efx_filter_match_flags_t flag; + /* Number of values of corresponding field */ + unsigned int vals_count; + /* Function to set values in specifications */ + sfc_flow_spec_set_vals *set_vals; + /* + * Function to check that the specification is suitable + * for adding this match flag + */ + sfc_flow_spec_check *spec_check; +}; + +static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags; +static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags; +static sfc_flow_spec_set_vals sfc_flow_set_ethertypes; +static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags; +static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags; static boolean_t sfc_flow_is_zero(const uint8_t *buf, unsigned int size) @@ -85,7 +121,6 @@ sfc_flow_parse_init(const struct rte_flow_item *item, const uint8_t *spec; const uint8_t *mask; const uint8_t *last; - uint8_t match; uint8_t supp; unsigned int i; @@ -115,13 +150,13 @@ sfc_flow_parse_init(const struct rte_flow_item *item, return -rte_errno; } - mask = (const uint8_t *)def_mask; + mask = def_mask; } else { - mask = (const uint8_t *)item->mask; + mask = item->mask; } - spec = (const uint8_t *)item->spec; - last = (const uint8_t *)item->last; + spec = item->spec; + last = item->last; if (spec == NULL) goto exit; @@ -146,12 +181,11 @@ sfc_flow_parse_init(const struct rte_flow_item *item, return -rte_errno; } - /* Check that mask and spec not asks for more match than supp_mask */ + /* Check that mask does not ask for more match than supp_mask */ for (i = 0; i < size; i++) { - match = spec[i] | mask[i]; supp = ((const uint8_t *)supp_mask)[i]; - if ((match | supp) != supp) { + if (~supp & mask[i]) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, "Item's field is not supported"); @@ -184,11 +218,11 @@ sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, * Convert Ethernet item to EFX filter specification. * * @param item[in] - * Item specification. Only source and destination addresses and - * Ethernet type fields are supported. In addition to full and - * empty masks of destination address, individual/group mask is - * also supported. If the mask is NULL, default mask will be used. - * Ranging is not supported. + * Item specification. Outer frame specification may only comprise + * source/destination addresses and Ethertype field. + * Inner frame specification may contain destination address only. + * There is support for individual/group mask as well as for empty and full. + * If the mask is NULL, default mask will be used. Ranging is not supported. * @param efx_spec[in, out] * EFX filter specification to update. * @param[out] error @@ -207,15 +241,32 @@ sfc_flow_parse_eth(const struct rte_flow_item *item, .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, .type = 0xffff, }; + const struct rte_flow_item_eth ifrm_supp_mask = { + .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + }; const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; + const struct rte_flow_item_eth *supp_mask_p; + const struct rte_flow_item_eth *def_mask_p; + uint8_t *loc_mac = NULL; + boolean_t is_ifrm = (efx_spec->efs_encap_type != + EFX_TUNNEL_PROTOCOL_NONE); + + if (is_ifrm) { + supp_mask_p = &ifrm_supp_mask; + def_mask_p = &ifrm_supp_mask; + loc_mac = efx_spec->efs_ifrm_loc_mac; + } else { + supp_mask_p = &supp_mask; + def_mask_p = &rte_flow_item_eth_mask; + loc_mac = efx_spec->efs_loc_mac; + } rc = sfc_flow_parse_init(item, (const void **)&spec, (const void **)&mask, - &supp_mask, - &rte_flow_item_eth_mask, + supp_mask_p, def_mask_p, sizeof(struct rte_flow_item_eth), error); if (rc != 0) @@ -226,21 +277,30 @@ sfc_flow_parse_eth(const struct rte_flow_item *item, return 0; if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) { - efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC; - rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes, + efx_spec->efs_match_flags |= is_ifrm ? + EFX_FILTER_MATCH_IFRM_LOC_MAC : + EFX_FILTER_MATCH_LOC_MAC; + rte_memcpy(loc_mac, spec->dst.addr_bytes, EFX_MAC_ADDR_LEN); } else if (memcmp(mask->dst.addr_bytes, ig_mask, EFX_MAC_ADDR_LEN) == 0) { if (is_unicast_ether_addr(&spec->dst)) - efx_spec->efs_match_flags |= + efx_spec->efs_match_flags |= is_ifrm ? + EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST : EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; else - efx_spec->efs_match_flags |= + efx_spec->efs_match_flags |= is_ifrm ? + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST : EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; } else if (!is_zero_ether_addr(&mask->dst)) { goto fail_bad_mask; } + /* + * ifrm_supp_mask ensures that the source address and + * ethertype masks are equal to zero in inner frame, + * so these fields are filled in only for the outer frame + */ if (is_same_ether_addr(&mask->src, &supp_mask.src)) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, @@ -291,6 +351,7 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item, const struct rte_flow_item_vlan *mask = NULL; const struct rte_flow_item_vlan supp_mask = { .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), + .inner_type = RTE_BE16(0xffff), }; rc = sfc_flow_parse_init(item, @@ -333,6 +394,22 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item, return -rte_errno; } + if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "VLAN TPID matching is not supported"); + return -rte_errno; + } + if (mask->inner_type == supp_mask.inner_type) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + efx_spec->efs_ether_type = rte_bswap16(spec->inner_type); + } else if (mask->inner_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Bad mask for VLAN inner_type"); + return -rte_errno; + } + return 0; } @@ -696,6 +773,253 @@ fail_bad_mask: return -rte_errno; } +/* + * Filters for encapsulated packets match based on the EtherType and IP + * protocol in the outer frame. + */ +static int +sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item, + efx_filter_spec_t *efx_spec, + uint8_t ip_proto, + struct rte_flow_error *error) +{ + if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; + efx_spec->efs_ip_proto = ip_proto; + } else if (efx_spec->efs_ip_proto != ip_proto) { + switch (ip_proto) { + case EFX_IPPROTO_UDP: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Outer IP header protocol must be UDP " + "in VxLAN/GENEVE pattern"); + return -rte_errno; + + case EFX_IPPROTO_GRE: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Outer IP header protocol must be GRE " + "in NVGRE pattern"); + return -rte_errno; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Only VxLAN/GENEVE/NVGRE tunneling patterns " + "are supported"); + return -rte_errno; + } + } + + if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 && + efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Outer frame EtherType in pattern with tunneling " + "must be IPv4 or IPv6"); + return -rte_errno; + } + + return 0; +} + +static int +sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec, + const uint8_t *vni_or_vsid_val, + const uint8_t *vni_or_vsid_mask, + const struct rte_flow_item *item, + struct rte_flow_error *error) +{ + const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = { + 0xff, 0xff, 0xff + }; + + if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask, + EFX_VNI_OR_VSID_LEN) == 0) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID; + rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val, + EFX_VNI_OR_VSID_LEN); + } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Unsupported VNI/VSID mask"); + return -rte_errno; + } + + return 0; +} + +/** + * Convert VXLAN item to EFX filter specification. + * + * @param item[in] + * Item specification. Only VXLAN network identifier field is supported. + * If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_vxlan(const struct rte_flow_item *item, + efx_filter_spec_t *efx_spec, + struct rte_flow_error *error) +{ + int rc; + const struct rte_flow_item_vxlan *spec = NULL; + const struct rte_flow_item_vxlan *mask = NULL; + const struct rte_flow_item_vxlan supp_mask = { + .vni = { 0xff, 0xff, 0xff } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_vxlan_mask, + sizeof(struct rte_flow_item_vxlan), + error); + if (rc != 0) + return rc; + + rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, + EFX_IPPROTO_UDP, error); + if (rc != 0) + return rc; + + efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + + if (spec == NULL) + return 0; + + rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, + mask->vni, item, error); + + return rc; +} + +/** + * Convert GENEVE item to EFX filter specification. + * + * @param item[in] + * Item specification. Only Virtual Network Identifier and protocol type + * fields are supported. But protocol type can be only Ethernet (0x6558). + * If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_geneve(const struct rte_flow_item *item, + efx_filter_spec_t *efx_spec, + struct rte_flow_error *error) +{ + int rc; + const struct rte_flow_item_geneve *spec = NULL; + const struct rte_flow_item_geneve *mask = NULL; + const struct rte_flow_item_geneve supp_mask = { + .protocol = RTE_BE16(0xffff), + .vni = { 0xff, 0xff, 0xff } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_geneve_mask, + sizeof(struct rte_flow_item_geneve), + error); + if (rc != 0) + return rc; + + rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, + EFX_IPPROTO_UDP, error); + if (rc != 0) + return rc; + + efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE; + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + + if (spec == NULL) + return 0; + + if (mask->protocol == supp_mask.protocol) { + if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "GENEVE encap. protocol must be Ethernet " + "(0x6558) in the GENEVE pattern item"); + return -rte_errno; + } + } else if (mask->protocol != 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Unsupported mask for GENEVE encap. protocol"); + return -rte_errno; + } + + rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni, + mask->vni, item, error); + + return rc; +} + +/** + * Convert NVGRE item to EFX filter specification. + * + * @param item[in] + * Item specification. Only virtual subnet ID field is supported. + * If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_nvgre(const struct rte_flow_item *item, + efx_filter_spec_t *efx_spec, + struct rte_flow_error *error) +{ + int rc; + const struct rte_flow_item_nvgre *spec = NULL; + const struct rte_flow_item_nvgre *mask = NULL; + const struct rte_flow_item_nvgre supp_mask = { + .tni = { 0xff, 0xff, 0xff } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_nvgre_mask, + sizeof(struct rte_flow_item_nvgre), + error); + if (rc != 0) + return rc; + + rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec, + EFX_IPPROTO_GRE, error); + if (rc != 0) + return rc; + + efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE; + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + + if (spec == NULL) + return 0; + + rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni, + mask->tni, item, error); + + return rc; +} + static const struct sfc_flow_item sfc_flow_items[] = { { .type = RTE_FLOW_ITEM_TYPE_VOID, @@ -739,6 +1063,24 @@ static const struct sfc_flow_item sfc_flow_items[] = { .layer = SFC_FLOW_ITEM_L4, .parse = sfc_flow_parse_udp, }, + { + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + .prev_layer = SFC_FLOW_ITEM_L4, + .layer = SFC_FLOW_ITEM_START_LAYER, + .parse = sfc_flow_parse_vxlan, + }, + { + .type = RTE_FLOW_ITEM_TYPE_GENEVE, + .prev_layer = SFC_FLOW_ITEM_L4, + .layer = SFC_FLOW_ITEM_START_LAYER, + .parse = sfc_flow_parse_geneve, + }, + { + .type = RTE_FLOW_ITEM_TYPE_NVGRE, + .prev_layer = SFC_FLOW_ITEM_L3, + .layer = SFC_FLOW_ITEM_START_LAYER, + .parse = sfc_flow_parse_nvgre, + }, }; /* @@ -773,6 +1115,12 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr, "Egress is not supported"); return -rte_errno; } + if (attr->transfer != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, + "Transfer is not supported"); + return -rte_errno; + } if (attr->ingress == 0) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, @@ -780,8 +1128,8 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr, return -rte_errno; } - flow->spec.efs_flags |= EFX_FILTER_FLAG_RX; - flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX; + flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; return 0; } @@ -806,6 +1154,7 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[], { int rc; unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER; + boolean_t is_ifrm = B_FALSE; const struct sfc_flow_item *item; if (pattern == NULL) { @@ -837,7 +1186,41 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[], return -rte_errno; } - rc = item->parse(pattern, &flow->spec, error); + /* + * Allow only VOID and ETH pattern items in the inner frame. + * Also check that there is only one tunneling protocol. + */ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VOID: + case RTE_FLOW_ITEM_TYPE_ETH: + break; + + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_GENEVE: + case RTE_FLOW_ITEM_TYPE_NVGRE: + if (is_ifrm) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "More than one tunneling protocol"); + return -rte_errno; + } + is_ifrm = B_TRUE; + break; + + default: + if (is_ifrm) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "There is an unsupported pattern item " + "in the inner frame"); + return -rte_errno; + } + break; + } + + rc = item->parse(pattern, &flow->spec.template, error); if (rc != 0) return rc; @@ -859,28 +1242,27 @@ sfc_flow_parse_queue(struct sfc_adapter *sa, return -EINVAL; rxq = sa->rxq_info[queue->index].rxq; - flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index; + flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index; return 0; } -#if EFSYS_OPT_RX_SCALE static int sfc_flow_parse_rss(struct sfc_adapter *sa, - const struct rte_flow_action_rss *rss, + const struct rte_flow_action_rss *action_rss, struct rte_flow *flow) { + struct sfc_rss *rss = &sa->rss; unsigned int rxq_sw_index; struct sfc_rxq *rxq; unsigned int rxq_hw_index_min; unsigned int rxq_hw_index_max; - const struct rte_eth_rss_conf *rss_conf = rss->rss_conf; - uint64_t rss_hf; - uint8_t *rss_key = NULL; + efx_rx_hash_type_t efx_hash_types; + const uint8_t *rss_key; struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf; unsigned int i; - if (rss->num == 0) + if (action_rss->queue_num == 0) return -EINVAL; rxq_sw_index = sa->rxq_count - 1; @@ -888,8 +1270,8 @@ sfc_flow_parse_rss(struct sfc_adapter *sa, rxq_hw_index_min = rxq->hw_index; rxq_hw_index_max = 0; - for (i = 0; i < rss->num; ++i) { - rxq_sw_index = rss->queue[i]; + for (i = 0; i < action_rss->queue_num; ++i) { + rxq_sw_index = action_rss->queue[i]; if (rxq_sw_index >= sa->rxq_count) return -EINVAL; @@ -903,28 +1285,62 @@ sfc_flow_parse_rss(struct sfc_adapter *sa, rxq_hw_index_max = rxq->hw_index; } - rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS; - if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0) + switch (action_rss->func) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + break; + default: + return -EINVAL; + } + + if (action_rss->level) return -EINVAL; - if (rss_conf != NULL) { - if (rss_conf->rss_key_len != sizeof(sa->rss_key)) + /* + * Dummy RSS action with only one queue and no specific settings + * for hash types and key does not require dedicated RSS context + * and may be simplified to single queue action. + */ + if (action_rss->queue_num == 1 && action_rss->types == 0 && + action_rss->key_len == 0) { + flow->spec.template.efs_dmaq_id = rxq_hw_index_min; + return 0; + } + + if (action_rss->types) { + int rc; + + rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types, + &efx_hash_types); + if (rc != 0) + return -rc; + } else { + unsigned int i; + + efx_hash_types = 0; + for (i = 0; i < rss->hf_map_nb_entries; ++i) + efx_hash_types |= rss->hf_map[i].efx; + } + + if (action_rss->key_len) { + if (action_rss->key_len != sizeof(rss->key)) return -EINVAL; - rss_key = rss_conf->rss_key; + rss_key = action_rss->key; } else { - rss_key = sa->rss_key; + rss_key = rss->key; } flow->rss = B_TRUE; sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; - sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf); - rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key)); + sfc_rss_conf->rss_hash_types = efx_hash_types; + rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key)); for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { - unsigned int rxq_sw_index = rss->queue[i % rss->num]; + unsigned int nb_queues = action_rss->queue_num; + unsigned int rxq_sw_index = action_rss->queue[i % nb_queues]; struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq; sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; @@ -932,47 +1348,101 @@ sfc_flow_parse_rss(struct sfc_adapter *sa, return 0; } -#endif /* EFSYS_OPT_RX_SCALE */ + +static int +sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec, + unsigned int filters_count) +{ + unsigned int i; + int ret = 0; + + for (i = 0; i < filters_count; i++) { + int rc; + + rc = efx_filter_remove(sa->nic, &spec->filters[i]); + if (ret == 0 && rc != 0) { + sfc_err(sa, "failed to remove filter specification " + "(rc = %d)", rc); + ret = rc; + } + } + + return ret; +} + +static int +sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < spec->count; i++) { + rc = efx_filter_insert(sa->nic, &spec->filters[i]); + if (rc != 0) { + sfc_flow_spec_flush(sa, spec, i); + break; + } + } + + return rc; +} + +static int +sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec) +{ + return sfc_flow_spec_flush(sa, spec, spec->count); +} static int sfc_flow_filter_insert(struct sfc_adapter *sa, struct rte_flow *flow) { - efx_filter_spec_t *spec = &flow->spec; - -#if EFSYS_OPT_RX_SCALE - struct sfc_flow_rss *rss = &flow->rss_conf; + struct sfc_rss *rss = &sa->rss; + struct sfc_flow_rss *flow_rss = &flow->rss_conf; + uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; + unsigned int i; int rc = 0; if (flow->rss) { - unsigned int rss_spread = MIN(rss->rxq_hw_index_max - - rss->rxq_hw_index_min + 1, + unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max - + flow_rss->rxq_hw_index_min + 1, EFX_MAXRSS); rc = efx_rx_scale_context_alloc(sa->nic, EFX_RX_SCALE_EXCLUSIVE, rss_spread, - &spec->efs_rss_context); + &efs_rss_context); if (rc != 0) goto fail_scale_context_alloc; - rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context, - EFX_RX_HASHALG_TOEPLITZ, - rss->rss_hash_types, B_TRUE); + rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context, + rss->hash_alg, + flow_rss->rss_hash_types, B_TRUE); if (rc != 0) goto fail_scale_mode_set; - rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context, - rss->rss_key, - sizeof(sa->rss_key)); + rc = efx_rx_scale_key_set(sa->nic, efs_rss_context, + flow_rss->rss_key, + sizeof(rss->key)); if (rc != 0) goto fail_scale_key_set; - spec->efs_dmaq_id = rss->rxq_hw_index_min; - spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; + /* + * At this point, fully elaborated filter specifications + * have been produced from the template. To make sure that + * RSS behaviour is consistent between them, set the same + * RSS context value everywhere. + */ + for (i = 0; i < flow->spec.count; i++) { + efx_filter_spec_t *spec = &flow->spec.filters[i]; + + spec->efs_rss_context = efs_rss_context; + spec->efs_dmaq_id = flow_rss->rxq_hw_index_min; + spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; + } } - rc = efx_filter_insert(sa->nic, spec); + rc = sfc_flow_spec_insert(sa, &flow->spec); if (rc != 0) goto fail_filter_insert; @@ -985,8 +1455,9 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, * the HW knows all the information needed to verify * the table entries, and the operation will succeed */ - rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context, - rss->rss_tbl, RTE_DIM(rss->rss_tbl)); + rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context, + flow_rss->rss_tbl, + RTE_DIM(flow_rss->rss_tbl)); if (rc != 0) goto fail_scale_tbl_set; } @@ -994,40 +1465,58 @@ sfc_flow_filter_insert(struct sfc_adapter *sa, return 0; fail_scale_tbl_set: - efx_filter_remove(sa->nic, spec); + sfc_flow_spec_remove(sa, &flow->spec); fail_filter_insert: fail_scale_key_set: fail_scale_mode_set: - if (flow->rss) - efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); + if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT) + efx_rx_scale_context_free(sa->nic, efs_rss_context); fail_scale_context_alloc: return rc; -#else /* !EFSYS_OPT_RX_SCALE */ - return efx_filter_insert(sa->nic, spec); -#endif /* EFSYS_OPT_RX_SCALE */ } static int sfc_flow_filter_remove(struct sfc_adapter *sa, struct rte_flow *flow) { - efx_filter_spec_t *spec = &flow->spec; int rc = 0; - rc = efx_filter_remove(sa->nic, spec); + rc = sfc_flow_spec_remove(sa, &flow->spec); if (rc != 0) return rc; -#if EFSYS_OPT_RX_SCALE - if (flow->rss) + if (flow->rss) { + /* + * All specifications for a given flow rule have the same RSS + * context, so that RSS context value is taken from the first + * filter specification + */ + efx_filter_spec_t *spec = &flow->spec.filters[0]; + rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); -#endif /* EFSYS_OPT_RX_SCALE */ + } return rc; } +static int +sfc_flow_parse_mark(struct sfc_adapter *sa, + const struct rte_flow_action_mark *mark, + struct rte_flow *flow) +{ + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + + if (mark == NULL || mark->id > encp->enc_filter_action_mark_max) + return EINVAL; + + flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK; + flow->spec.template.efs_mark = mark->id; + + return 0; +} + static int sfc_flow_parse_actions(struct sfc_adapter *sa, const struct rte_flow_action actions[], @@ -1035,7 +1524,13 @@ sfc_flow_parse_actions(struct sfc_adapter *sa, struct rte_flow_error *error) { int rc; - boolean_t is_specified = B_FALSE; + const unsigned int dp_rx_features = sa->dp_rx->features; + uint32_t actions_set = 0; + const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) | + (1UL << RTE_FLOW_ACTION_TYPE_RSS) | + (1UL << RTE_FLOW_ACTION_TYPE_DROP); + const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) | + (1UL << RTE_FLOW_ACTION_TYPE_FLAG); if (actions == NULL) { rte_flow_error_set(error, EINVAL, @@ -1044,12 +1539,22 @@ sfc_flow_parse_actions(struct sfc_adapter *sa, return -rte_errno; } +#define SFC_BUILD_SET_OVERFLOW(_action, _set) \ + RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT) + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID, + actions_set); break; case RTE_FLOW_ACTION_TYPE_QUEUE: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE, + actions_set); + if ((actions_set & fate_actions_mask) != 0) + goto fail_fate_actions; + rc = sfc_flow_parse_queue(sa, actions->conf, flow); if (rc != 0) { rte_flow_error_set(error, EINVAL, @@ -1057,23 +1562,71 @@ sfc_flow_parse_actions(struct sfc_adapter *sa, "Bad QUEUE action"); return -rte_errno; } - - is_specified = B_TRUE; break; -#if EFSYS_OPT_RX_SCALE case RTE_FLOW_ACTION_TYPE_RSS: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS, + actions_set); + if ((actions_set & fate_actions_mask) != 0) + goto fail_fate_actions; + rc = sfc_flow_parse_rss(sa, actions->conf, flow); if (rc != 0) { - rte_flow_error_set(error, rc, + rte_flow_error_set(error, -rc, RTE_FLOW_ERROR_TYPE_ACTION, actions, "Bad RSS action"); return -rte_errno; } + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP, + actions_set); + if ((actions_set & fate_actions_mask) != 0) + goto fail_fate_actions; + + flow->spec.template.efs_dmaq_id = + EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; + break; + + case RTE_FLOW_ACTION_TYPE_FLAG: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG, + actions_set); + if ((actions_set & mark_actions_mask) != 0) + goto fail_actions_overlap; + + if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "FLAG action is not supported on the current Rx datapath"); + return -rte_errno; + } - is_specified = B_TRUE; + flow->spec.template.efs_flags |= + EFX_FILTER_FLAG_ACTION_FLAG; + break; + + case RTE_FLOW_ACTION_TYPE_MARK: + SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK, + actions_set); + if ((actions_set & mark_actions_mask) != 0) + goto fail_actions_overlap; + + if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "MARK action is not supported on the current Rx datapath"); + return -rte_errno; + } + + rc = sfc_flow_parse_mark(sa, actions->conf, flow); + if (rc != 0) { + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Bad MARK action"); + return -rte_errno; + } break; -#endif /* EFSYS_OPT_RX_SCALE */ default: rte_flow_error_set(error, ENOTSUP, @@ -1081,12 +1634,529 @@ sfc_flow_parse_actions(struct sfc_adapter *sa, "Action is not supported"); return -rte_errno; } + + actions_set |= (1UL << actions->type); + } +#undef SFC_BUILD_SET_OVERFLOW + + /* When fate is unknown, drop traffic. */ + if ((actions_set & fate_actions_mask) == 0) { + flow->spec.template.efs_dmaq_id = + EFX_FILTER_SPEC_RX_DMAQ_ID_DROP; } - if (!is_specified) { + return 0; + +fail_fate_actions: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Cannot combine several fate-deciding actions, " + "choose between QUEUE, RSS or DROP"); + return -rte_errno; + +fail_actions_overlap: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Overlapping actions are not supported"); + return -rte_errno; +} + +/** + * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST + * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same + * specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same match flag, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + unsigned int i; + static const efx_filter_match_flags_t vals[] = { + EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, + EFX_FILTER_MATCH_UNKNOWN_MCAST_DST + }; + + if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions, - "Action is unspecified"); + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect while copying " + "by unknown destination flags"); + return -rte_errno; + } + + for (i = 0; i < spec->count; i++) { + /* The check above ensures that divisor can't be zero here */ + spec->filters[i].efs_match_flags |= + vals[i / filters_count_for_one_val]; + } + + return 0; +} + +/** + * Check that the following conditions are met: + * - the list of supported filters has a filter + * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of + * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also + * be inserted. + * + * @param match[in] + * The match flags of filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter with list of supported filters. + */ +static boolean_t +sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match, + __rte_unused efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_filter_match_flags_t match_mcast_dst; + + match_mcast_dst = + (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) | + EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; + for (i = 0; i < filter->supported_match_num; i++) { + if (match_mcast_dst == filter->supported_match[i]) + return B_TRUE; + } + + return B_FALSE; +} + +/** + * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and + * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same + * specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same EtherType value, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_ethertypes(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + unsigned int i; + static const uint16_t vals[] = { + EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6 + }; + + if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect " + "while copying by Ethertype"); + return -rte_errno; + } + + for (i = 0; i < spec->count; i++) { + spec->filters[i].efs_match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE; + + /* + * The check above ensures that + * filters_count_for_one_val is not 0 + */ + spec->filters[i].efs_ether_type = + vals[i / filters_count_for_one_val]; + } + + return 0; +} + +/** + * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and + * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same + * specifications after copying. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param filters_count_for_one_val[in] + * How many specifications should have the same match flag, what is the + * number of specifications before copying. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec, + unsigned int filters_count_for_one_val, + struct rte_flow_error *error) +{ + unsigned int i; + static const efx_filter_match_flags_t vals[] = { + EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST + }; + + if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Number of specifications is incorrect while copying " + "by inner frame unknown destination flags"); + return -rte_errno; + } + + for (i = 0; i < spec->count; i++) { + /* The check above ensures that divisor can't be zero here */ + spec->filters[i].efs_match_flags |= + vals[i / filters_count_for_one_val]; + } + + return 0; +} + +/** + * Check that the following conditions are met: + * - the specification corresponds to a filter for encapsulated traffic + * - the list of supported filters has a filter + * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of + * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also + * be inserted. + * + * @param match[in] + * The match flags of filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter with list of supported filters. + */ +static boolean_t +sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match, + efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_tunnel_protocol_t encap_type = spec->efs_encap_type; + efx_filter_match_flags_t match_mcast_dst; + + if (encap_type == EFX_TUNNEL_PROTOCOL_NONE) + return B_FALSE; + + match_mcast_dst = + (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) | + EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST; + for (i = 0; i < filter->supported_match_num; i++) { + if (match_mcast_dst == filter->supported_match[i]) + return B_TRUE; + } + + return B_FALSE; +} + +/* + * Match flags that can be automatically added to filters. + * Selecting the last minimum when searching for the copy flag ensures that the + * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than + * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter + * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported + * filters. + */ +static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = { + { + .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, + .vals_count = 2, + .set_vals = sfc_flow_set_unknown_dst_flags, + .spec_check = sfc_flow_check_unknown_dst_flags, + }, + { + .flag = EFX_FILTER_MATCH_ETHER_TYPE, + .vals_count = 2, + .set_vals = sfc_flow_set_ethertypes, + .spec_check = NULL, + }, + { + .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, + .vals_count = 2, + .set_vals = sfc_flow_set_ifrm_unknown_dst_flags, + .spec_check = sfc_flow_check_ifrm_unknown_dst_flags, + }, +}; + +/* Get item from array sfc_flow_copy_flags */ +static const struct sfc_flow_copy_flag * +sfc_flow_get_copy_flag(efx_filter_match_flags_t flag) +{ + unsigned int i; + + for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { + if (sfc_flow_copy_flags[i].flag == flag) + return &sfc_flow_copy_flags[i]; + } + + return NULL; +} + +/** + * Make copies of the specifications, set match flag and values + * of the field that corresponds to it. + * + * @param spec[in, out] + * SFC flow specification to update. + * @param flag[in] + * The match flag to add. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec, + efx_filter_match_flags_t flag, + struct rte_flow_error *error) +{ + unsigned int i; + unsigned int new_filters_count; + unsigned int filters_count_for_one_val; + const struct sfc_flow_copy_flag *copy_flag; + int rc; + + copy_flag = sfc_flow_get_copy_flag(flag); + if (copy_flag == NULL) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Unsupported spec field for copying"); + return -rte_errno; + } + + new_filters_count = spec->count * copy_flag->vals_count; + if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Too much EFX specifications in the flow rule"); + return -rte_errno; + } + + /* Copy filters specifications */ + for (i = spec->count; i < new_filters_count; i++) + spec->filters[i] = spec->filters[i - spec->count]; + + filters_count_for_one_val = spec->count; + spec->count = new_filters_count; + + rc = copy_flag->set_vals(spec, filters_count_for_one_val, error); + if (rc != 0) + return rc; + + return 0; +} + +/** + * Check that the given set of match flags missing in the original filter spec + * could be covered by adding spec copies which specify the corresponding + * flags and packet field values to match. + * + * @param miss_flags[in] + * Flags that are missing until the supported filter. + * @param spec[in] + * Specification to be supplemented. + * @param filter[in] + * SFC filter. + * + * @return + * Number of specifications after copy or 0, if the flags can not be added. + */ +static unsigned int +sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags, + efx_filter_spec_t *spec, + struct sfc_filter *filter) +{ + unsigned int i; + efx_filter_match_flags_t copy_flags = 0; + efx_filter_match_flags_t flag; + efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags; + sfc_flow_spec_check *check; + unsigned int multiplier = 1; + + for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { + flag = sfc_flow_copy_flags[i].flag; + check = sfc_flow_copy_flags[i].spec_check; + if ((flag & miss_flags) == flag) { + if (check != NULL && (!check(match, spec, filter))) + continue; + + copy_flags |= flag; + multiplier *= sfc_flow_copy_flags[i].vals_count; + } + } + + if (copy_flags == miss_flags) + return multiplier; + + return 0; +} + +/** + * Attempt to supplement the specification template to the minimally + * supported set of match flags. To do this, it is necessary to copy + * the specifications, filling them with the values of fields that + * correspond to the missing flags. + * The necessary and sufficient filter is built from the fewest number + * of copies which could be made to cover the minimally required set + * of flags. + * + * @param sa[in] + * SFC adapter. + * @param spec[in, out] + * SFC flow specification to update. + * @param error[out] + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_spec_filters_complete(struct sfc_adapter *sa, + struct sfc_flow_spec *spec, + struct rte_flow_error *error) +{ + struct sfc_filter *filter = &sa->filter; + efx_filter_match_flags_t miss_flags; + efx_filter_match_flags_t min_miss_flags = 0; + efx_filter_match_flags_t match; + unsigned int min_multiplier = UINT_MAX; + unsigned int multiplier; + unsigned int i; + int rc; + + match = spec->template.efs_match_flags; + for (i = 0; i < filter->supported_match_num; i++) { + if ((match & filter->supported_match[i]) == match) { + miss_flags = filter->supported_match[i] & (~match); + multiplier = sfc_flow_check_missing_flags(miss_flags, + &spec->template, filter); + if (multiplier > 0) { + if (multiplier <= min_multiplier) { + min_multiplier = multiplier; + min_miss_flags = miss_flags; + } + } + } + } + + if (min_multiplier == UINT_MAX) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "The flow rule pattern is unsupported"); + return -rte_errno; + } + + for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) { + efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag; + + if ((flag & min_miss_flags) == flag) { + rc = sfc_flow_spec_add_match_flag(spec, flag, error); + if (rc != 0) + return rc; + } + } + + return 0; +} + +/** + * Check that set of match flags is referred to by a filter. Filter is + * described by match flags with the ability to add OUTER_VID and INNER_VID + * flags. + * + * @param match_flags[in] + * Set of match flags. + * @param flags_pattern[in] + * Pattern of filter match flags. + */ +static boolean_t +sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags, + efx_filter_match_flags_t flags_pattern) +{ + if ((match_flags & flags_pattern) != flags_pattern) + return B_FALSE; + + switch (match_flags & ~flags_pattern) { + case 0: + case EFX_FILTER_MATCH_OUTER_VID: + case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID: + return B_TRUE; + default: + return B_FALSE; + } +} + +/** + * Check whether the spec maps to a hardware filter which is known to be + * ineffective despite being valid. + * + * @param spec[in] + * SFC flow specification. + */ +static boolean_t +sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec) +{ + unsigned int i; + uint16_t ether_type; + uint8_t ip_proto; + efx_filter_match_flags_t match_flags; + + for (i = 0; i < spec->count; i++) { + match_flags = spec->filters[i].efs_match_flags; + + if (sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE) || + sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_LOC_MAC)) { + ether_type = spec->filters[i].efs_ether_type; + if (ether_type == EFX_ETHER_TYPE_IPV4 || + ether_type == EFX_ETHER_TYPE_IPV6) + return B_TRUE; + } else if (sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO) || + sfc_flow_is_match_with_vids(match_flags, + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_MAC)) { + ip_proto = spec->filters[i].efs_ip_proto; + if (ip_proto == EFX_IPPROTO_TCP || + ip_proto == EFX_IPPROTO_UDP) + return B_TRUE; + } + } + + return B_FALSE; +} + +static int +sfc_flow_validate_match_flags(struct sfc_adapter *sa, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + efx_filter_spec_t *spec_tmpl = &flow->spec.template; + efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags; + int rc; + + /* Initialize the first filter spec with template */ + flow->spec.filters[0] = *spec_tmpl; + flow->spec.count = 1; + + if (!sfc_filter_is_match_supported(sa, match_flags)) { + rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error); + if (rc != 0) + return rc; + } + + if (sfc_flow_is_match_flags_exception(&flow->spec)) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "The flow rule pattern is unsupported"); return -rte_errno; } @@ -1116,12 +2186,11 @@ sfc_flow_parse(struct rte_eth_dev *dev, if (rc != 0) goto fail_bad_value; - if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Flow rule pattern is not supported"); - return -rte_errno; - } + rc = sfc_flow_validate_match_flags(sa, flow, error); + if (rc != 0) + goto fail_bad_value; + + return 0; fail_bad_value: return rc; diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h index 35472ad3..71ec18cb 100644 --- a/drivers/net/sfc/sfc_flow.h +++ b/drivers/net/sfc/sfc_flow.h @@ -19,7 +19,13 @@ extern "C" { #endif -#if EFSYS_OPT_RX_SCALE +/* + * The maximum number of fully elaborated hardware filter specifications + * which can be produced from a template by means of multiplication, if + * missing match flags are needed to be taken into account + */ +#define SF_FLOW_SPEC_NB_FILTERS_MAX 8 + /* RSS configuration storage */ struct sfc_flow_rss { unsigned int rxq_hw_index_min; @@ -28,15 +34,22 @@ struct sfc_flow_rss { uint8_t rss_key[EFX_RSS_KEY_SIZE]; unsigned int rss_tbl[EFX_RSS_TBL_SIZE]; }; -#endif /* EFSYS_OPT_RX_SCALE */ + +/* Filter specification storage */ +struct sfc_flow_spec { + /* partial specification from flow rule */ + efx_filter_spec_t template; + /* fully elaborated hardware filters specifications */ + efx_filter_spec_t filters[SF_FLOW_SPEC_NB_FILTERS_MAX]; + /* number of complete specifications */ + unsigned int count; +}; /* PMD-specific definition of the opaque type from rte_flow.h */ struct rte_flow { - efx_filter_spec_t spec; /* filter specification */ -#if EFSYS_OPT_RX_SCALE + struct sfc_flow_spec spec; /* flow spec for hardware filter(s) */ boolean_t rss; /* RSS toggle */ struct sfc_flow_rss rss_conf; /* RSS configuration */ -#endif /* EFSYS_OPT_RX_SCALE */ TAILQ_ENTRY(rte_flow) entries; /* flow list entries */ }; diff --git a/drivers/net/sfc/sfc_intr.c b/drivers/net/sfc/sfc_intr.c index d6c84927..fbdc7eea 100644 --- a/drivers/net/sfc/sfc_intr.c +++ b/drivers/net/sfc/sfc_intr.c @@ -86,7 +86,7 @@ sfc_intr_line_handler(void *cb_arg) exit: if (lsc_seq != sa->port.lsc_seq) { - sfc_info(sa, "link status change event: link %s", + sfc_notice(sa, "link status change event: link %s", sa->eth_dev->data->dev_link.link_status ? "UP" : "DOWN"); _rte_eth_dev_callback_process(sa->eth_dev, @@ -130,7 +130,7 @@ sfc_intr_message_handler(void *cb_arg) exit: if (lsc_seq != sa->port.lsc_seq) { - sfc_info(sa, "link status change event"); + sfc_notice(sa, "link status change event"); _rte_eth_dev_callback_process(sa->eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); @@ -251,7 +251,7 @@ sfc_intr_configure(struct sfc_adapter *sa) intr->handler = NULL; intr->lsc_intr = (sa->eth_dev->data->dev_conf.intr_conf.lsc != 0); if (!intr->lsc_intr) { - sfc_info(sa, "LSC tracking using interrupts is disabled"); + sfc_notice(sa, "LSC tracking using interrupts is disabled"); goto done; } diff --git a/drivers/net/sfc/sfc_kvargs.c b/drivers/net/sfc/sfc_kvargs.c index 8f55e99b..7a89c769 100644 --- a/drivers/net/sfc/sfc_kvargs.c +++ b/drivers/net/sfc/sfc_kvargs.c @@ -23,11 +23,11 @@ sfc_kvargs_parse(struct sfc_adapter *sa) struct rte_devargs *devargs = eth_dev->device->devargs; const char **params = (const char *[]){ SFC_KVARG_STATS_UPDATE_PERIOD_MS, - SFC_KVARG_DEBUG_INIT, - SFC_KVARG_MCDI_LOGGING, SFC_KVARG_PERF_PROFILE, SFC_KVARG_RX_DATAPATH, SFC_KVARG_TX_DATAPATH, + SFC_KVARG_FW_VARIANT, + SFC_KVARG_RXD_WAIT_TIMEOUT_NS, NULL, }; diff --git a/drivers/net/sfc/sfc_kvargs.h b/drivers/net/sfc/sfc_kvargs.h index e7044ca6..4506667a 100644 --- a/drivers/net/sfc/sfc_kvargs.h +++ b/drivers/net/sfc/sfc_kvargs.h @@ -18,10 +18,6 @@ extern "C" { #define SFC_KVARG_VALUES_BOOL "[1|y|yes|on|0|n|no|off]" -#define SFC_KVARG_DEBUG_INIT "debug_init" - -#define SFC_KVARG_MCDI_LOGGING "mcdi_logging" - #define SFC_KVARG_PERF_PROFILE "perf_profile" #define SFC_KVARG_PERF_PROFILE_AUTO "auto" @@ -37,11 +33,13 @@ extern "C" { #define SFC_KVARG_DATAPATH_EFX "efx" #define SFC_KVARG_DATAPATH_EF10 "ef10" #define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple" +#define SFC_KVARG_DATAPATH_EF10_ESSB "ef10_essb" #define SFC_KVARG_RX_DATAPATH "rx_datapath" #define SFC_KVARG_VALUES_RX_DATAPATH \ "[" SFC_KVARG_DATAPATH_EFX "|" \ - SFC_KVARG_DATAPATH_EF10 "]" + SFC_KVARG_DATAPATH_EF10 "|" \ + SFC_KVARG_DATAPATH_EF10_ESSB "]" #define SFC_KVARG_TX_DATAPATH "tx_datapath" #define SFC_KVARG_VALUES_TX_DATAPATH \ @@ -49,6 +47,22 @@ extern "C" { SFC_KVARG_DATAPATH_EF10 "|" \ SFC_KVARG_DATAPATH_EF10_SIMPLE "]" +#define SFC_KVARG_FW_VARIANT "fw_variant" + +#define SFC_KVARG_FW_VARIANT_DONT_CARE "dont-care" +#define SFC_KVARG_FW_VARIANT_FULL_FEATURED "full-feature" +#define SFC_KVARG_FW_VARIANT_LOW_LATENCY "ultra-low-latency" +#define SFC_KVARG_FW_VARIANT_PACKED_STREAM "capture-packed-stream" +#define SFC_KVARG_FW_VARIANT_DPDK "dpdk" +#define SFC_KVARG_VALUES_FW_VARIANT \ + "[" SFC_KVARG_FW_VARIANT_DONT_CARE "|" \ + SFC_KVARG_FW_VARIANT_FULL_FEATURED "|" \ + SFC_KVARG_FW_VARIANT_LOW_LATENCY "|" \ + SFC_KVARG_FW_VARIANT_PACKED_STREAM "|" \ + SFC_KVARG_FW_VARIANT_DPDK "]" + +#define SFC_KVARG_RXD_WAIT_TIMEOUT_NS "rxd_wait_timeout_ns" + struct sfc_adapter; int sfc_kvargs_parse(struct sfc_adapter *sa); diff --git a/drivers/net/sfc/sfc_log.h b/drivers/net/sfc/sfc_log.h index a18191ed..d6f34352 100644 --- a/drivers/net/sfc/sfc_log.h +++ b/drivers/net/sfc/sfc_log.h @@ -10,14 +10,35 @@ #ifndef _SFC_LOG_H_ #define _SFC_LOG_H_ +/** Generic driver log type */ +extern uint32_t sfc_logtype_driver; + +/** Common log type name prefix */ +#define SFC_LOGTYPE_PREFIX "pmd.net.sfc." + +/** Log PMD generic message, add a prefix and a line break */ +#define SFC_GENERIC_LOG(level, ...) \ + rte_log(RTE_LOG_ ## level, sfc_logtype_driver, \ + RTE_FMT("PMD: " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \ + RTE_FMT_TAIL(__VA_ARGS__ ,))) + +/** Name prefix for the per-device log type used to report basic information */ +#define SFC_LOGTYPE_MAIN_STR SFC_LOGTYPE_PREFIX "main" + +/** Device MCDI log type name prefix */ +#define SFC_LOGTYPE_MCDI_STR SFC_LOGTYPE_PREFIX "mcdi" + +/** Level value used by MCDI log statements */ +#define SFC_LOG_LEVEL_MCDI RTE_LOG_INFO + /* Log PMD message, automatically add prefix and \n */ -#define SFC_LOG(sa, level, ...) \ +#define SFC_LOG(sa, level, type, ...) \ do { \ const struct sfc_adapter *__sa = (sa); \ \ - RTE_LOG(level, PMD, \ - RTE_FMT("sfc_efx " PCI_PRI_FMT " #%" PRIu8 ": " \ - RTE_FMT_HEAD(__VA_ARGS__,) "\n", \ + rte_log(level, type, \ + RTE_FMT("PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu8 \ + ": " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \ __sa->pci_addr.domain, \ __sa->pci_addr.bus, \ __sa->pci_addr.devid, \ @@ -27,27 +48,55 @@ } while (0) #define sfc_err(sa, ...) \ - SFC_LOG(sa, ERR, __VA_ARGS__) + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa, RTE_LOG_ERR, _sa->logtype_main, \ + __VA_ARGS__); \ + } while (0) #define sfc_warn(sa, ...) \ - SFC_LOG(sa, WARNING, __VA_ARGS__) + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa, RTE_LOG_WARNING, _sa->logtype_main, \ + __VA_ARGS__); \ + } while (0) #define sfc_notice(sa, ...) \ - SFC_LOG(sa, NOTICE, __VA_ARGS__) + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa, RTE_LOG_NOTICE, _sa->logtype_main, \ + __VA_ARGS__); \ + } while (0) #define sfc_info(sa, ...) \ - SFC_LOG(sa, INFO, __VA_ARGS__) + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa, RTE_LOG_INFO, _sa->logtype_main, \ + __VA_ARGS__); \ + } while (0) #define sfc_log_init(sa, ...) \ do { \ const struct sfc_adapter *_sa = (sa); \ \ - if (_sa->debug_init) \ - SFC_LOG(_sa, INFO, \ - RTE_FMT("%s(): " \ - RTE_FMT_HEAD(__VA_ARGS__,), \ - __func__, \ - RTE_FMT_TAIL(__VA_ARGS__,))); \ + SFC_LOG(_sa, RTE_LOG_INFO, _sa->logtype_main, \ + RTE_FMT("%s(): " \ + RTE_FMT_HEAD(__VA_ARGS__ ,), \ + __func__, \ + RTE_FMT_TAIL(__VA_ARGS__ ,))); \ } while (0) +#define sfc_log_mcdi(sa, ...) \ + do { \ + const struct sfc_adapter *_sa = (sa); \ + \ + SFC_LOG(_sa, SFC_LOG_LEVEL_MCDI, _sa->mcdi.logtype, \ + __VA_ARGS__); \ + } while (0) + + #endif /* _SFC_LOG_H_ */ diff --git a/drivers/net/sfc/sfc_mcdi.c b/drivers/net/sfc/sfc_mcdi.c index 9d92b8c5..007506b4 100644 --- a/drivers/net/sfc/sfc_mcdi.c +++ b/drivers/net/sfc/sfc_mcdi.c @@ -15,7 +15,6 @@ #include "sfc.h" #include "sfc_log.h" -#include "sfc_kvargs.h" #include "sfc_ev.h" #define SFC_MCDI_POLL_INTERVAL_MIN_US 10 /* 10us in 1us units */ @@ -176,7 +175,7 @@ sfc_mcdi_do_log(const struct sfc_adapter *sa, * at the end which is required by netlogdecode. */ buffer[position] = '\0'; - sfc_info(sa, "%s \\", buffer); + sfc_log_mcdi(sa, "%s \\", buffer); /* Preserve prefix for the next log message */ position = pfxsize; } @@ -198,10 +197,17 @@ sfc_mcdi_logger(void *arg, efx_log_msg_t type, size_t pfxsize; size_t start; - if (!sa->mcdi.logging) + /* + * Unlike the other cases, MCDI logging implies more onerous work + * needed to produce a message. If the dynamic log level prevents + * the end result from being printed, the CPU time will be wasted. + * + * To avoid wasting time, the actual level is examined in advance. + */ + if (rte_log_get_level(sa->mcdi.logtype) < (int)SFC_LOG_LEVEL_MCDI) return; - /* The format including prefix added by sfc_info() is the format + /* The format including prefix added by sfc_log_mcdi() is the format * consumed by the Solarflare netlogdecode tool. */ pfxsize = snprintf(buffer, sizeof(buffer), "MCDI RPC %s:", @@ -212,7 +218,7 @@ sfc_mcdi_logger(void *arg, efx_log_msg_t type, start = sfc_mcdi_do_log(sa, buffer, data, data_size, pfxsize, start); if (start != pfxsize) { buffer[start] = '\0'; - sfc_info(sa, "%s", buffer); + sfc_log_mcdi(sa, "%s", buffer); } } @@ -250,11 +256,8 @@ sfc_mcdi_init(struct sfc_adapter *sa) if (rc != 0) goto fail_dma_alloc; - /* Convert negative error to positive used in the driver */ - rc = sfc_kvargs_process(sa, SFC_KVARG_MCDI_LOGGING, - sfc_kvarg_bool_handler, &mcdi->logging); - if (rc != 0) - goto fail_kvargs_process; + mcdi->logtype = sfc_register_logtype(sa, SFC_LOGTYPE_MCDI_STR, + RTE_LOG_NOTICE); emtp = &mcdi->transport; emtp->emt_context = sa; @@ -274,8 +277,6 @@ sfc_mcdi_init(struct sfc_adapter *sa) fail_mcdi_init: memset(emtp, 0, sizeof(*emtp)); - -fail_kvargs_process: sfc_dma_free(sa, &mcdi->mem); fail_dma_alloc: diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c index c423f527..5384dbbd 100644 --- a/drivers/net/sfc/sfc_port.c +++ b/drivers/net/sfc/sfc_port.c @@ -121,6 +121,28 @@ sfc_port_init_dev_link(struct sfc_adapter *sa) return 0; } +#if EFSYS_OPT_LOOPBACK + +static efx_link_mode_t +sfc_port_phy_caps_to_max_link_speed(uint32_t phy_caps) +{ + if (phy_caps & (1u << EFX_PHY_CAP_100000FDX)) + return EFX_LINK_100000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_50000FDX)) + return EFX_LINK_50000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_40000FDX)) + return EFX_LINK_40000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_25000FDX)) + return EFX_LINK_25000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_10000FDX)) + return EFX_LINK_10000FDX; + if (phy_caps & (1u << EFX_PHY_CAP_1000FDX)) + return EFX_LINK_1000FDX; + return EFX_LINK_UNKNOWN; +} + +#endif + int sfc_port_start(struct sfc_adapter *sa) { @@ -143,6 +165,21 @@ sfc_port_start(struct sfc_adapter *sa) if (rc != 0) goto fail_port_init; +#if EFSYS_OPT_LOOPBACK + if (sa->eth_dev->data->dev_conf.lpbk_mode != 0) { + efx_link_mode_t link_mode; + + link_mode = + sfc_port_phy_caps_to_max_link_speed(port->phy_adv_cap); + sfc_log_init(sa, "set loopback link_mode=%u type=%u", link_mode, + sa->eth_dev->data->dev_conf.lpbk_mode); + rc = efx_port_loopback_set(sa->nic, link_mode, + sa->eth_dev->data->dev_conf.lpbk_mode); + if (rc != 0) + goto fail_loopback_set; + } +#endif + sfc_log_init(sa, "set flow control to %#x autoneg=%u", port->flow_ctrl, port->flow_ctrl_autoneg); rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl, @@ -155,6 +192,16 @@ sfc_port_start(struct sfc_adapter *sa) SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0); phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps); + /* + * No controls for FEC yet. Use default FEC mode. + * I.e. advertise everything supported (*_FEC=1), but do not request + * anything explicitly (*_FEC_REQUESTED=0). + */ + phy_adv_cap |= port->phy_adv_cap_mask & + (1u << EFX_PHY_CAP_BASER_FEC | + 1u << EFX_PHY_CAP_RS_FEC | + 1u << EFX_PHY_CAP_25G_BASER_FEC); + sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap); rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap); if (rc != 0) @@ -268,6 +315,9 @@ fail_mac_addr_set: fail_mac_pdu_set: fail_phy_adv_cap_set: fail_mac_fcntl_set: +#if EFSYS_OPT_LOOPBACK +fail_loopback_set: +#endif efx_port_fini(sa->nic); fail_port_init: @@ -323,6 +373,8 @@ sfc_port_attach(struct sfc_adapter *sa) struct sfc_port *port = &sa->port; const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); const struct ether_addr *from; + uint32_t mac_nstats; + size_t mac_stats_size; long kvarg_stats_update_period_ms; int rc; @@ -358,7 +410,9 @@ sfc_port_attach(struct sfc_adapter *sa) if (port->mac_stats_buf == NULL) goto fail_mac_stats_buf_alloc; - rc = sfc_dma_alloc(sa, "mac_stats", 0, EFX_MAC_STATS_SIZE, + mac_nstats = efx_nic_cfg_get(sa->nic)->enc_mac_stats_nstats; + mac_stats_size = RTE_ALIGN(mac_nstats * sizeof(uint64_t), EFX_BUF_SIZE); + rc = sfc_dma_alloc(sa, "mac_stats", 0, mac_stats_size, sa->socket_id, &port->mac_stats_dma_mem); if (rc != 0) goto fail_mac_stats_dma_alloc; @@ -470,10 +524,22 @@ sfc_port_link_mode_to_info(efx_link_mode_t link_mode, link_info->link_speed = ETH_SPEED_NUM_10G; link_info->link_duplex = ETH_LINK_FULL_DUPLEX; break; + case EFX_LINK_25000FDX: + link_info->link_speed = ETH_SPEED_NUM_25G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; case EFX_LINK_40000FDX: link_info->link_speed = ETH_SPEED_NUM_40G; link_info->link_duplex = ETH_LINK_FULL_DUPLEX; break; + case EFX_LINK_50000FDX: + link_info->link_speed = ETH_SPEED_NUM_50G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case EFX_LINK_100000FDX: + link_info->link_speed = ETH_SPEED_NUM_100G; + link_info->link_duplex = ETH_LINK_FULL_DUPLEX; + break; default: SFC_ASSERT(B_FALSE); /* FALLTHROUGH */ diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c index abc53fb5..cc76a5b1 100644 --- a/drivers/net/sfc/sfc_rx.c +++ b/drivers/net/sfc/sfc_rx.c @@ -184,7 +184,6 @@ sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps) return ptypes; } -#if EFSYS_OPT_RX_SCALE static void sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags, struct rte_mbuf *m) @@ -205,14 +204,6 @@ sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags, m->ol_flags |= PKT_RX_RSS_HASH; } } -#else -static void -sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq, - __rte_unused unsigned int flags, - __rte_unused struct rte_mbuf *m) -{ -} -#endif static uint16_t sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -393,6 +384,7 @@ sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq) static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings; static int sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc, + __rte_unused struct rte_mempool *mb_pool, unsigned int *rxq_entries, unsigned int *evq_entries, unsigned int *rxq_max_fill_level) @@ -608,7 +600,7 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) sfc_err(sa, "RxQ %u flush failed", sw_index); if (rxq->state & SFC_RXQ_FLUSHED) - sfc_info(sa, "RxQ %u flushed", sw_index); + sfc_notice(sa, "RxQ %u flushed", sw_index); } sa->dp_rx->qpurge(rxq->dp); @@ -617,7 +609,8 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) static int sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq) { - boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE; + struct sfc_rss *rss = &sa->rss; + boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE; struct sfc_port *port = &sa->port; int rc; @@ -629,7 +622,7 @@ sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq) * repeat this step without promiscuous and all-multicast flags set */ retry: - rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss); + rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss); if (rc == 0) return 0; else if (rc != EOPNOTSUPP) @@ -687,10 +680,37 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) if (rc != 0) goto fail_ev_qstart; - rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type, - &rxq->mem, rxq_info->entries, - 0 /* not used on EF10 */, rxq_info->type_flags, - evq->common, &rxq->common); + switch (rxq_info->type) { + case EFX_RXQ_TYPE_DEFAULT: + rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type, + &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */, + rxq_info->type_flags, evq->common, &rxq->common); + break; + case EFX_RXQ_TYPE_ES_SUPER_BUFFER: { + struct rte_mempool *mp = rxq->refill_mb_pool; + struct rte_mempool_info mp_info; + + rc = rte_mempool_ops_get_info(mp, &mp_info); + if (rc != 0) { + /* Positive errno is used in the driver */ + rc = -rc; + goto fail_mp_get_info; + } + if (mp_info.contig_block_size <= 0) { + rc = EINVAL; + goto fail_bad_contig_block_size; + } + rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0, + mp_info.contig_block_size, rxq->buf_size, + mp->header_size + mp->elt_size + mp->trailer_size, + sa->rxd_wait_timeout_ns, + &rxq->mem, rxq_info->entries, rxq_info->type_flags, + evq->common, &rxq->common); + break; + } + default: + rc = ENOTSUP; + } if (rc != 0) goto fail_rx_qcreate; @@ -721,6 +741,8 @@ fail_dp_qstart: sfc_rx_qflush(sa, sw_index); fail_rx_qcreate: +fail_bad_contig_block_size: +fail_mp_get_info: sfc_ev_qstop(evq); fail_ev_qstart: @@ -792,48 +814,10 @@ sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa) return caps; } -static void -sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group, - const char *verdict, uint64_t offloads) -{ - unsigned long long bit; - - while ((bit = __builtin_ffsll(offloads)) != 0) { - uint64_t flag = (1ULL << --bit); - - sfc_err(sa, "Rx %s offload %s %s", offload_group, - rte_eth_dev_rx_offload_name(flag), verdict); - - offloads &= ~flag; - } -} - -static boolean_t -sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested) -{ - uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads; - uint64_t supported = sfc_rx_get_dev_offload_caps(sa) | - sfc_rx_get_queue_offload_caps(sa); - uint64_t rejected = requested & ~supported; - uint64_t missing = (requested & mandatory) ^ mandatory; - boolean_t mismatch = B_FALSE; - - if (rejected) { - sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected); - mismatch = B_TRUE; - } - - if (missing) { - sfc_rx_log_offloads(sa, "queue", "must be set", missing); - mismatch = B_TRUE; - } - - return mismatch; -} - static int sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level, - const struct rte_eth_rxconf *rx_conf) + const struct rte_eth_rxconf *rx_conf, + uint64_t offloads) { uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) | sfc_rx_get_queue_offload_caps(sa); @@ -858,17 +842,14 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level, rc = EINVAL; } - if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) != + if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) != DEV_RX_OFFLOAD_CHECKSUM) sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)"); if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) && - (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) + (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on"); - if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads)) - rc = EINVAL; - return rc; } @@ -887,7 +868,7 @@ sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool) order = MIN(order, rte_bsf32(data_off)); - return 1u << (order - 1); + return 1u << order; } static uint16_t @@ -979,26 +960,29 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, struct rte_mempool *mb_pool) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + struct sfc_rss *rss = &sa->rss; int rc; unsigned int rxq_entries; unsigned int evq_entries; unsigned int rxq_max_fill_level; + uint64_t offloads; uint16_t buf_size; struct sfc_rxq_info *rxq_info; struct sfc_evq *evq; struct sfc_rxq *rxq; struct sfc_dp_rx_qcreate_info info; - rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries, - &rxq_max_fill_level); + rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries, + &evq_entries, &rxq_max_fill_level); if (rc != 0) goto fail_size_up_rings; SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS); SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS); - SFC_ASSERT(rxq_entries >= nb_rx_desc); SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc); - rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf); + offloads = rx_conf->offloads | + sa->eth_dev->data->dev_conf.rxmode.offloads; + rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads); if (rc != 0) goto fail_bad_conf; @@ -1011,7 +995,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, } if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) && - (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) { + (~offloads & DEV_RX_OFFLOAD_SCATTER)) { sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool " "object size is too small", sw_index); sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs " @@ -1027,9 +1011,14 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(rxq_entries <= rxq_info->max_entries); rxq_info->entries = rxq_entries; - rxq_info->type = EFX_RXQ_TYPE_DEFAULT; + + if (sa->dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER) + rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER; + else + rxq_info->type = EFX_RXQ_TYPE_DEFAULT; + rxq_info->type_flags = - (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ? + (offloads & DEV_RX_OFFLOAD_SCATTER) ? EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE; if ((encp->enc_tunnel_encapsulations_supported != 0) && @@ -1054,6 +1043,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, rxq->refill_threshold = RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK); rxq->refill_mb_pool = mb_pool; + rxq->buf_size = buf_size; rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries), socket_id, &rxq->mem); @@ -1068,10 +1058,8 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.batch_max = encp->enc_rx_batch_max; info.prefix_size = encp->enc_rx_prefix_size; -#if EFSYS_OPT_RX_SCALE - if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0) + if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0) info.flags |= SFC_RXQ_FLAG_RSS_HASH; -#endif info.rxq_entries = rxq_info->entries; info.rxq_hw_ring = rxq->mem.esm_base; @@ -1079,6 +1067,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.evq_hw_ring = evq->mem.esm_base; info.hw_index = rxq->hw_index; info.mem_bar = sa->mem_bar.esb_base; + info.vi_window_shift = encp->enc_vi_window_shift; rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index, &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, @@ -1140,85 +1129,228 @@ sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index) rte_free(rxq); } -#if EFSYS_OPT_RX_SCALE -efx_rx_hash_type_t -sfc_rte_to_efx_hash_type(uint64_t rss_hf) +/* + * Mapping between RTE RSS hash functions and their EFX counterparts. + */ +struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = { + { ETH_RSS_NONFRAG_IPV4_TCP, + EFX_RX_HASH(IPV4_TCP, 4TUPLE) }, + { ETH_RSS_NONFRAG_IPV4_UDP, + EFX_RX_HASH(IPV4_UDP, 4TUPLE) }, + { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX, + EFX_RX_HASH(IPV6_TCP, 4TUPLE) }, + { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX, + EFX_RX_HASH(IPV6_UDP, 4TUPLE) }, + { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER, + EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) | + EFX_RX_HASH(IPV4, 2TUPLE) }, + { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER | + ETH_RSS_IPV6_EX, + EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) | + EFX_RX_HASH(IPV6, 2TUPLE) } +}; + +static efx_rx_hash_type_t +sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type, + unsigned int *hash_type_flags_supported, + unsigned int nb_hash_type_flags_supported) { - efx_rx_hash_type_t efx_hash_types = 0; + efx_rx_hash_type_t hash_type_masked = 0; + unsigned int i, j; + + for (i = 0; i < nb_hash_type_flags_supported; ++i) { + unsigned int class_tuple_lbn[] = { + EFX_RX_CLASS_IPV4_TCP_LBN, + EFX_RX_CLASS_IPV4_UDP_LBN, + EFX_RX_CLASS_IPV4_LBN, + EFX_RX_CLASS_IPV6_TCP_LBN, + EFX_RX_CLASS_IPV6_UDP_LBN, + EFX_RX_CLASS_IPV6_LBN + }; + + for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) { + unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE; + unsigned int flag; + + tuple_mask <<= class_tuple_lbn[j]; + flag = hash_type & tuple_mask; + + if (flag == hash_type_flags_supported[i]) + hash_type_masked |= flag; + } + } - if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER)) != 0) - efx_hash_types |= EFX_RX_HASH_IPV4; + return hash_type_masked; +} + +int +sfc_rx_hash_init(struct sfc_adapter *sa) +{ + struct sfc_rss *rss = &sa->rss; + const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); + uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask; + efx_rx_hash_alg_t alg; + unsigned int flags_supp[EFX_RX_HASH_NFLAGS]; + unsigned int nb_flags_supp; + struct sfc_rss_hf_rte_to_efx *hf_map; + struct sfc_rss_hf_rte_to_efx *entry; + efx_rx_hash_type_t efx_hash_types; + unsigned int i; + int rc; + + if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ)) + alg = EFX_RX_HASHALG_TOEPLITZ; + else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM)) + alg = EFX_RX_HASHALG_PACKED_STREAM; + else + return EINVAL; - if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0) - efx_hash_types |= EFX_RX_HASH_TCPIPV4; + rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp, + &nb_flags_supp); + if (rc != 0) + return rc; - if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | - ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0) - efx_hash_types |= EFX_RX_HASH_IPV6; + hf_map = rte_calloc_socket("sfc-rss-hf-map", + RTE_DIM(sfc_rss_hf_map), + sizeof(*hf_map), 0, sa->socket_id); + if (hf_map == NULL) + return ENOMEM; + + entry = hf_map; + efx_hash_types = 0; + for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) { + efx_rx_hash_type_t ht; + + ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx, + flags_supp, nb_flags_supp); + if (ht != 0) { + entry->rte = sfc_rss_hf_map[i].rte; + entry->efx = ht; + efx_hash_types |= ht; + ++entry; + } + } - if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0) - efx_hash_types |= EFX_RX_HASH_TCPIPV6; + rss->hash_alg = alg; + rss->hf_map_nb_entries = (unsigned int)(entry - hf_map); + rss->hf_map = hf_map; + rss->hash_types = efx_hash_types; - return efx_hash_types; + return 0; +} + +void +sfc_rx_hash_fini(struct sfc_adapter *sa) +{ + struct sfc_rss *rss = &sa->rss; + + rte_free(rss->hf_map); +} + +int +sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte, + efx_rx_hash_type_t *efx) +{ + struct sfc_rss *rss = &sa->rss; + efx_rx_hash_type_t hash_types = 0; + unsigned int i; + + for (i = 0; i < rss->hf_map_nb_entries; ++i) { + uint64_t rte_mask = rss->hf_map[i].rte; + + if ((rte & rte_mask) != 0) { + rte &= ~rte_mask; + hash_types |= rss->hf_map[i].efx; + } + } + + if (rte != 0) { + sfc_err(sa, "unsupported hash functions requested"); + return EINVAL; + } + + *efx = hash_types; + + return 0; } uint64_t -sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types) +sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, efx_rx_hash_type_t efx) { - uint64_t rss_hf = 0; + struct sfc_rss *rss = &sa->rss; + uint64_t rte = 0; + unsigned int i; - if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0) - rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | - ETH_RSS_NONFRAG_IPV4_OTHER); + for (i = 0; i < rss->hf_map_nb_entries; ++i) { + efx_rx_hash_type_t hash_type = rss->hf_map[i].efx; - if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0) - rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + if ((efx & hash_type) == hash_type) + rte |= rss->hf_map[i].rte; + } - if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0) - rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | - ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX); + return rte; +} - if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0) - rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX); +static int +sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa, + struct rte_eth_rss_conf *conf) +{ + struct sfc_rss *rss = &sa->rss; + efx_rx_hash_type_t efx_hash_types = rss->hash_types; + uint64_t rss_hf = sfc_rx_hf_efx_to_rte(sa, efx_hash_types); + int rc; + + if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { + if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) || + conf->rss_key != NULL) + return EINVAL; + } + + if (conf->rss_hf != 0) { + rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types); + if (rc != 0) + return rc; + } + + if (conf->rss_key != NULL) { + if (conf->rss_key_len != sizeof(rss->key)) { + sfc_err(sa, "RSS key size is wrong (should be %lu)", + sizeof(rss->key)); + return EINVAL; + } + rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key)); + } + + rss->hash_types = efx_hash_types; - return rss_hf; + return 0; } -#endif -#if EFSYS_OPT_RX_SCALE static int sfc_rx_rss_config(struct sfc_adapter *sa) { + struct sfc_rss *rss = &sa->rss; int rc = 0; - if (sa->rss_channels > 0) { + if (rss->channels > 0) { rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, - EFX_RX_HASHALG_TOEPLITZ, - sa->rss_hash_types, B_TRUE); + rss->hash_alg, rss->hash_types, + B_TRUE); if (rc != 0) goto finish; rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, - sa->rss_key, - sizeof(sa->rss_key)); + rss->key, sizeof(rss->key)); if (rc != 0) goto finish; rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, - sa->rss_tbl, RTE_DIM(sa->rss_tbl)); + rss->tbl, RTE_DIM(rss->tbl)); } finish: return rc; } -#else -static int -sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa) -{ - return 0; -} -#endif int sfc_rx_start(struct sfc_adapter *sa) @@ -1292,35 +1424,25 @@ sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) static int sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode) { - uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) | - sfc_rx_get_queue_offload_caps(sa); - uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported; + struct sfc_rss *rss = &sa->rss; int rc = 0; switch (rxmode->mq_mode) { case ETH_MQ_RX_NONE: /* No special checks are required */ break; -#if EFSYS_OPT_RX_SCALE case ETH_MQ_RX_RSS: - if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) { + if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) { sfc_err(sa, "RSS is not available"); rc = EINVAL; } break; -#endif default: sfc_err(sa, "Rx multi-queue mode %u not supported", rxmode->mq_mode); rc = EINVAL; } - if (offloads_rejected) { - sfc_rx_log_offloads(sa, "device", "is unsupported", - offloads_rejected); - rc = EINVAL; - } - if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) { sfc_warn(sa, "FCS stripping cannot be disabled - always on"); rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP; @@ -1361,6 +1483,7 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues) int sfc_rx_configure(struct sfc_adapter *sa) { + struct sfc_rss *rss = &sa->rss; struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues; int rc; @@ -1410,21 +1533,26 @@ sfc_rx_configure(struct sfc_adapter *sa) sa->rxq_count++; } -#if EFSYS_OPT_RX_SCALE - sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? - MIN(sa->rxq_count, EFX_MAXRSS) : 0; + rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? + MIN(sa->rxq_count, EFX_MAXRSS) : 0; - if (sa->rss_channels > 0) { + if (rss->channels > 0) { + struct rte_eth_rss_conf *adv_conf_rss; unsigned int sw_index; for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index) - sa->rss_tbl[sw_index] = sw_index % sa->rss_channels; + rss->tbl[sw_index] = sw_index % rss->channels; + + adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf; + rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss); + if (rc != 0) + goto fail_rx_process_adv_conf_rss; } -#endif done: return 0; +fail_rx_process_adv_conf_rss: fail_rx_qinit_info: fail_rxqs_realloc: fail_rxqs_alloc: @@ -1443,9 +1571,11 @@ fail_check_mode: void sfc_rx_close(struct sfc_adapter *sa) { + struct sfc_rss *rss = &sa->rss; + sfc_rx_fini_queues(sa, 0); - sa->rss_channels = 0; + rss->channels = 0; rte_free(sa->rxq_info); sa->rxq_info = NULL; diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h index 6706ee6f..3fba7d8a 100644 --- a/drivers/net/sfc/sfc_rx.h +++ b/drivers/net/sfc/sfc_rx.h @@ -60,6 +60,7 @@ struct sfc_rxq { unsigned int hw_index; unsigned int refill_threshold; struct rte_mempool *refill_mb_pool; + uint16_t buf_size; struct sfc_dp_rxq *dp; unsigned int state; }; @@ -152,10 +153,12 @@ unsigned int sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index); int sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset); -#if EFSYS_OPT_RX_SCALE -efx_rx_hash_type_t sfc_rte_to_efx_hash_type(uint64_t rss_hf); -uint64_t sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types); -#endif +int sfc_rx_hash_init(struct sfc_adapter *sa); +void sfc_rx_hash_fini(struct sfc_adapter *sa); +int sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte, + efx_rx_hash_type_t *efx); +uint64_t sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, + efx_rx_hash_type_t efx); #ifdef __cplusplus } diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c index ba8496df..effe9853 100644 --- a/drivers/net/sfc/sfc_tso.c +++ b/drivers/net/sfc/sfc_tso.c @@ -164,7 +164,8 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx, rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); sent_seq = rte_be_to_cpu_32(sent_seq); - efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz, + efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq, + m->tso_segsz, *pend, EFX_TX_FATSOV2_OPT_NDESCS); *pend += EFX_TX_FATSOV2_OPT_NDESCS; diff --git a/drivers/net/sfc/sfc_tweak.h b/drivers/net/sfc/sfc_tweak.h index b4026851..4d543f68 100644 --- a/drivers/net/sfc/sfc_tweak.h +++ b/drivers/net/sfc/sfc_tweak.h @@ -34,4 +34,12 @@ /** Number of mbufs to be freed in bulk in a single call */ #define SFC_TX_REAP_BULK_SIZE 32 +/** + * Default head-of-line block timeout to wait for Rx descriptor before + * packet drop because of no descriptors available. + * + * DPDK FW variant only with equal stride super-buffer Rx mode. + */ +#define SFC_RXD_WAIT_TIMEOUT_NS_DEF (200U * 1000) + #endif /* _SFC_TWEAK_H_ */ diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index 757b03ba..1bcc2c69 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -73,48 +73,10 @@ sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa) return caps; } -static void -sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group, - const char *verdict, uint64_t offloads) -{ - unsigned long long bit; - - while ((bit = __builtin_ffsll(offloads)) != 0) { - uint64_t flag = (1ULL << --bit); - - sfc_err(sa, "Tx %s offload %s %s", offload_group, - rte_eth_dev_tx_offload_name(flag), verdict); - - offloads &= ~flag; - } -} - -static int -sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested) -{ - uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads; - uint64_t supported = sfc_tx_get_dev_offload_caps(sa) | - sfc_tx_get_queue_offload_caps(sa); - uint64_t rejected = requested & ~supported; - uint64_t missing = (requested & mandatory) ^ mandatory; - boolean_t mismatch = B_FALSE; - - if (rejected) { - sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected); - mismatch = B_TRUE; - } - - if (missing) { - sfc_tx_log_offloads(sa, "queue", "must be set", missing); - mismatch = B_TRUE; - } - - return mismatch; -} - static int sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level, - const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf, + uint64_t offloads) { int rc = 0; @@ -138,15 +100,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level, } /* We either perform both TCP and UDP offload, or no offload at all */ - if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) != - ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) { + if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) != + ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) { sfc_err(sa, "TCP and UDP offloads can't be set independently"); rc = EINVAL; } - if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads)) - rc = EINVAL; - return rc; } @@ -171,6 +130,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, struct sfc_txq *txq; int rc = 0; struct sfc_dp_tx_qcreate_info info; + uint64_t offloads; sfc_log_init(sa, "TxQ = %u", sw_index); @@ -183,7 +143,9 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(txq_entries >= nb_tx_desc); SFC_ASSERT(txq_max_fill_level <= nb_tx_desc); - rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf); + offloads = tx_conf->offloads | + sa->eth_dev->data->dev_conf.txmode.offloads; + rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads); if (rc != 0) goto fail_bad_conf; @@ -210,7 +172,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : SFC_TX_DEFAULT_FREE_THRESH; txq->flags = tx_conf->txq_flags; - txq->offloads = tx_conf->offloads; + txq->offloads = offloads; rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries), socket_id, &txq->mem); @@ -221,7 +183,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.max_fill_level = txq_max_fill_level; info.free_thresh = txq->free_thresh; info.flags = tx_conf->txq_flags; - info.offloads = tx_conf->offloads; + info.offloads = offloads; info.txq_entries = txq_info->entries; info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max; info.txq_hw_ring = txq->mem.esm_base; @@ -229,6 +191,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.evq_hw_ring = evq->mem.esm_base; info.hw_index = txq->hw_index; info.mem_bar = sa->mem_bar.esb_base; + info.vi_window_shift = encp->enc_vi_window_shift; rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index, &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, @@ -303,9 +266,6 @@ sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) static int sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode) { - uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) | - sfc_tx_get_queue_offload_caps(sa); - uint64_t offloads_rejected = txmode->offloads & ~offloads_supported; int rc = 0; switch (txmode->mq_mode) { @@ -336,12 +296,6 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode) rc = EINVAL; } - if (offloads_rejected) { - sfc_tx_log_offloads(sa, "device", "is unsupported", - offloads_rejected); - rc = EINVAL; - } - return rc; } @@ -495,8 +449,7 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) (txq->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { flags |= EFX_TXQ_CKSUM_TCPUDP; - if ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) && - (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) + if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) flags |= EFX_TXQ_CKSUM_INNER_TCPUDP; } @@ -606,7 +559,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) sfc_err(sa, "TxQ %u flush timed out", sw_index); if (txq->state & SFC_TXQ_FLUSHED) - sfc_info(sa, "TxQ %u flushed", sw_index); + sfc_notice(sa, "TxQ %u flushed", sw_index); } sa->dp_tx->qreap(txq->dp); diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c index b0c13415..6b3c13e5 100644 --- a/drivers/net/softnic/rte_eth_softnic.c +++ b/drivers/net/softnic/rte_eth_softnic.c @@ -67,6 +67,12 @@ static const struct rte_eth_dev_info pmd_dev_info = { }, }; +static int pmd_softnic_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, pmd_softnic_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + static void pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *dev_info) @@ -522,13 +528,15 @@ pmd_ethdev_register(struct rte_vdev_device *vdev, soft_dev->data->dev_private = dev_private; soft_dev->data->dev_link.link_speed = hard_speed; soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - soft_dev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG; + soft_dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; soft_dev->data->dev_link.link_status = ETH_LINK_DOWN; soft_dev->data->mac_addrs = ð_addr; soft_dev->data->promiscuous = 1; soft_dev->data->kdrv = RTE_KDRV_NONE; soft_dev->data->numa_node = numa_node; + rte_eth_dev_probing_finish(soft_dev); + return 0; } @@ -725,13 +733,27 @@ pmd_probe(struct rte_vdev_device *vdev) uint16_t hard_port_id; int numa_node; void *dev_private; + struct rte_eth_dev *eth_dev; + const char *name = rte_vdev_device_name(vdev); - RTE_LOG(INFO, PMD, - "Probing device \"%s\"\n", - rte_vdev_device_name(vdev)); + PMD_LOG(INFO, "Probing device \"%s\"", name); /* Parse input arguments */ params = rte_vdev_device_args(vdev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(params) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + PMD_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &pmd_ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + if (!params) return -EINVAL; @@ -763,8 +785,8 @@ pmd_probe(struct rte_vdev_device *vdev) return -ENOMEM; /* Register soft ethdev */ - RTE_LOG(INFO, PMD, - "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n", + PMD_LOG(INFO, + "Creating soft ethdev \"%s\" for hard ethdev \"%s\"", p.soft.name, p.hard.name); status = pmd_ethdev_register(vdev, &p, dev_private); @@ -785,7 +807,7 @@ pmd_remove(struct rte_vdev_device *vdev) if (!vdev) return -EINVAL; - RTE_LOG(INFO, PMD, "Removing device \"%s\"\n", + PMD_LOG(INFO, "Removing device \"%s\"", rte_vdev_device_name(vdev)); /* Find the ethdev entry */ @@ -820,3 +842,12 @@ RTE_PMD_REGISTER_PARAM_STRING(net_softnic, PMD_PARAM_SOFT_TM_DEQ_BSZ "= " PMD_PARAM_HARD_NAME "= " PMD_PARAM_HARD_TX_QUEUE_ID "="); + +RTE_INIT(pmd_softnic_init_log); +static void +pmd_softnic_init_log(void) +{ + pmd_softnic_logtype = rte_log_register("pmd.net.softnic"); + if (pmd_softnic_logtype >= 0) + rte_log_set_level(pmd_softnic_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c index 79f1c6a8..11d638a9 100644 --- a/drivers/net/softnic/rte_eth_softnic_tm.c +++ b/drivers/net/softnic/rte_eth_softnic_tm.c @@ -479,6 +479,8 @@ static const struct rte_tm_capabilities tm_cap = { .sched_wfq_n_groups_max = 1, .sched_wfq_weight_max = UINT32_MAX, + .cman_wred_packet_mode_supported = WRED_SUPPORTED, + .cman_wred_byte_mode_supported = 0, .cman_head_drop_supported = 0, .cman_wred_context_n_max = 0, .cman_wred_context_private_n_max = 0, @@ -667,6 +669,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { .shaper_shared_n_max = 0, .cman_head_drop_supported = 0, + .cman_wred_packet_mode_supported = WRED_SUPPORTED, + .cman_wred_byte_mode_supported = 0, .cman_wred_context_private_supported = WRED_SUPPORTED, .cman_wred_context_shared_n_max = 0, @@ -828,6 +832,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = { {.leaf = { .cman_head_drop_supported = 0, + .cman_wred_packet_mode_supported = WRED_SUPPORTED, + .cman_wred_byte_mode_supported = 0, .cman_wred_context_private_supported = WRED_SUPPORTED, .cman_wred_context_shared_n_max = 0, } }, @@ -1243,12 +1249,23 @@ wred_profile_check(struct rte_eth_dev *dev, NULL, rte_strerror(EINVAL)); + /* WRED profile should be in packet mode */ + if (profile->packet_mode == 0) + return -rte_tm_error_set(error, + ENOTSUP, + RTE_TM_ERROR_TYPE_WRED_PROFILE, + NULL, + rte_strerror(ENOTSUP)); + /* min_th <= max_th, max_th > 0 */ for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) { - uint16_t min_th = profile->red_params[color].min_th; - uint16_t max_th = profile->red_params[color].max_th; + uint32_t min_th = profile->red_params[color].min_th; + uint32_t max_th = profile->red_params[color].max_th; - if (min_th > max_th || max_th == 0) + if (min_th > max_th || + max_th == 0 || + min_th > UINT16_MAX || + max_th > UINT16_MAX) return -rte_tm_error_set(error, EINVAL, RTE_TM_ERROR_TYPE_WRED_PROFILE, diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile index 0ebd3ec5..b77fae16 100644 --- a/drivers/net/szedata2/Makefile +++ b/drivers/net/szedata2/Makefile @@ -1,33 +1,5 @@ -# BSD LICENSE -# -# Copyright (c) 2015 CESNET -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of CESNET nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2015 CESNET include $(RTE_SDK)/mk/rte.vars.mk @@ -51,7 +23,6 @@ LIBABIVER := 1 # all source are stored in SRCS-y # SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c -SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2_iobuf.c # # Export include files diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c index e53c738d..910c64d0 100644 --- a/drivers/net/szedata2/rte_eth_szedata2.c +++ b/drivers/net/szedata2/rte_eth_szedata2.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015 - 2016 CESNET - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of CESNET nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015 - 2016 CESNET */ #include @@ -50,10 +21,9 @@ #include #include #include -#include #include "rte_eth_szedata2.h" -#include "szedata2_iobuf.h" +#include "szedata2_logs.h" #define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32 #define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32 @@ -68,9 +38,53 @@ #define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u" +/** + * Format string for suffix used to differentiate between Ethernet ports + * on the same PCI device. + */ +#define SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT "-port%u" + +/** + * Maximum number of ports for one device. + */ +#define SZEDATA2_MAX_PORTS 2 + +/** + * Entry in list of PCI devices for this driver. + */ +struct pci_dev_list_entry; +struct pci_dev_list_entry { + LIST_ENTRY(pci_dev_list_entry) next; + struct rte_pci_device *pci_dev; + unsigned int port_count; +}; + +/* List of PCI devices with number of ports for this driver. */ +LIST_HEAD(pci_dev_list, pci_dev_list_entry) szedata2_pci_dev_list = + LIST_HEAD_INITIALIZER(szedata2_pci_dev_list); + +struct port_info { + unsigned int rx_base_id; + unsigned int tx_base_id; + unsigned int rx_count; + unsigned int tx_count; + int numa_node; +}; + +struct pmd_internals { + struct rte_eth_dev *dev; + uint16_t max_rx_queues; + uint16_t max_tx_queues; + unsigned int rxq_base_id; + unsigned int txq_base_id; + char *sze_dev_path; +}; + struct szedata2_rx_queue { + struct pmd_internals *priv; struct szedata *sze; uint8_t rx_channel; + uint16_t qid; uint16_t in_port; struct rte_mempool *mb_pool; volatile uint64_t rx_pkts; @@ -79,21 +93,17 @@ struct szedata2_rx_queue { }; struct szedata2_tx_queue { + struct pmd_internals *priv; struct szedata *sze; uint8_t tx_channel; + uint16_t qid; volatile uint64_t tx_pkts; volatile uint64_t tx_bytes; volatile uint64_t err_pkts; }; -struct pmd_internals { - struct szedata2_rx_queue rx_queue[RTE_ETH_SZEDATA2_MAX_RX_QUEUES]; - struct szedata2_tx_queue tx_queue[RTE_ETH_SZEDATA2_MAX_TX_QUEUES]; - uint16_t max_rx_queues; - uint16_t max_tx_queues; - char sze_dev[PATH_MAX]; - struct rte_mem_resource *pci_rsc; -}; +int szedata2_logtype_init; +int szedata2_logtype_driver; static struct ether_addr eth_addr = { .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 } @@ -133,8 +143,10 @@ eth_szedata2_rx(void *queue, for (i = 0; i < nb_pkts; i++) { mbuf = rte_pktmbuf_alloc(sze_q->mb_pool); - if (unlikely(mbuf == NULL)) + if (unlikely(mbuf == NULL)) { + sze_q->priv->dev->data->rx_mbuf_alloc_failed++; break; + } /* get the next sze packet */ if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes && @@ -318,10 +330,10 @@ eth_szedata2_rx(void *queue, * sze packet will not fit in one mbuf, * scattered mode is not enabled, drop packet */ - RTE_LOG(ERR, PMD, + PMD_DRV_LOG(ERR, "SZE segment %d bytes will not fit in one mbuf " "(%d bytes), scattered mode is not enabled, " - "drop packet!!\n", + "drop packet!!", packet_size, buf_size); rte_pktmbuf_free(mbuf); } @@ -354,6 +366,8 @@ eth_szedata2_rx_scattered(void *queue, uint16_t packet_len1 = 0; uint16_t packet_len2 = 0; uint16_t hw_data_align; + uint64_t *mbuf_failed_ptr = + &sze_q->priv->dev->data->rx_mbuf_alloc_failed; if (unlikely(sze_q->sze == NULL || nb_pkts == 0)) return 0; @@ -541,6 +555,7 @@ eth_szedata2_rx_scattered(void *queue, sze->ct_rx_lck = ct_rx_lck_backup; sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup; sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup; + sze_q->priv->dev->data->rx_mbuf_alloc_failed++; break; } @@ -590,6 +605,7 @@ eth_szedata2_rx_scattered(void *queue, ct_rx_rem_bytes_backup; sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup; + (*mbuf_failed_ptr)++; goto finish; } @@ -633,6 +649,7 @@ eth_szedata2_rx_scattered(void *queue, ct_rx_rem_bytes_backup; sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup; + (*mbuf_failed_ptr)++; goto finish; } @@ -889,7 +906,7 @@ eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id) if (rxq->sze == NULL) { uint32_t rx = 1 << rxq->rx_channel; uint32_t tx = 0; - rxq->sze = szedata_open(internals->sze_dev); + rxq->sze = szedata_open(internals->sze_dev_path); if (rxq->sze == NULL) return -EINVAL; ret = szedata_subscribe3(rxq->sze, &rx, &tx); @@ -934,7 +951,7 @@ eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id) if (txq->sze == NULL) { uint32_t rx = 0; uint32_t tx = 1 << txq->tx_channel; - txq->sze = szedata_open(internals->sze_dev); + txq->sze = szedata_open(internals->sze_dev_path); if (txq->sze == NULL) return -EINVAL; ret = szedata_subscribe3(txq->sze, &rx, &tx); @@ -1017,7 +1034,7 @@ static int eth_dev_configure(struct rte_eth_dev *dev) { struct rte_eth_dev_data *data = dev->data; - if (data->dev_conf.rxmode.enable_scatter == 1) { + if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { dev->rx_pkt_burst = eth_szedata2_rx_scattered; data->scattered_rx = 1; } else { @@ -1032,13 +1049,17 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct pmd_internals *internals = dev->data->dev_private; - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); + dev_info->if_index = 0; dev_info->max_mac_addrs = 1; dev_info->max_rx_pktlen = (uint32_t)-1; dev_info->max_rx_queues = internals->max_rx_queues; dev_info->max_tx_queues = internals->max_tx_queues; dev_info->min_rx_bufsize = 0; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER; + dev_info->tx_offload_capa = 0; + dev_info->rx_queue_offload_capa = 0; + dev_info->tx_queue_offload_capa = 0; dev_info->speed_capa = ETH_LINK_SPEED_100G; } @@ -1054,22 +1075,29 @@ eth_stats_get(struct rte_eth_dev *dev, uint64_t tx_err_total = 0; uint64_t rx_total_bytes = 0; uint64_t tx_total_bytes = 0; - const struct pmd_internals *internals = dev->data->dev_private; - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_rx; i++) { - stats->q_ipackets[i] = internals->rx_queue[i].rx_pkts; - stats->q_ibytes[i] = internals->rx_queue[i].rx_bytes; - rx_total += stats->q_ipackets[i]; - rx_total_bytes += stats->q_ibytes[i]; + for (i = 0; i < nb_rx; i++) { + struct szedata2_rx_queue *rxq = dev->data->rx_queues[i]; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_ipackets[i] = rxq->rx_pkts; + stats->q_ibytes[i] = rxq->rx_bytes; + } + rx_total += rxq->rx_pkts; + rx_total_bytes += rxq->rx_bytes; } - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_tx; i++) { - stats->q_opackets[i] = internals->tx_queue[i].tx_pkts; - stats->q_obytes[i] = internals->tx_queue[i].tx_bytes; - stats->q_errors[i] = internals->tx_queue[i].err_pkts; - tx_total += stats->q_opackets[i]; - tx_total_bytes += stats->q_obytes[i]; - tx_err_total += stats->q_errors[i]; + for (i = 0; i < nb_tx; i++) { + struct szedata2_tx_queue *txq = dev->data->tx_queues[i]; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_opackets[i] = txq->tx_pkts; + stats->q_obytes[i] = txq->tx_bytes; + stats->q_errors[i] = txq->err_pkts; + } + tx_total += txq->tx_pkts; + tx_total_bytes += txq->tx_bytes; + tx_err_total += txq->err_pkts; } stats->ipackets = rx_total; @@ -1077,6 +1105,7 @@ eth_stats_get(struct rte_eth_dev *dev, stats->ibytes = rx_total_bytes; stats->obytes = tx_total_bytes; stats->oerrors = tx_err_total; + stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; return 0; } @@ -1087,17 +1116,18 @@ eth_stats_reset(struct rte_eth_dev *dev) uint16_t i; uint16_t nb_rx = dev->data->nb_rx_queues; uint16_t nb_tx = dev->data->nb_tx_queues; - struct pmd_internals *internals = dev->data->dev_private; for (i = 0; i < nb_rx; i++) { - internals->rx_queue[i].rx_pkts = 0; - internals->rx_queue[i].rx_bytes = 0; - internals->rx_queue[i].err_pkts = 0; + struct szedata2_rx_queue *rxq = dev->data->rx_queues[i]; + rxq->rx_pkts = 0; + rxq->rx_bytes = 0; + rxq->err_pkts = 0; } for (i = 0; i < nb_tx; i++) { - internals->tx_queue[i].tx_pkts = 0; - internals->tx_queue[i].tx_bytes = 0; - internals->tx_queue[i].err_pkts = 0; + struct szedata2_tx_queue *txq = dev->data->tx_queues[i]; + txq->tx_pkts = 0; + txq->tx_bytes = 0; + txq->err_pkts = 0; } } @@ -1105,9 +1135,11 @@ static void eth_rx_queue_release(void *q) { struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q; - if (rxq->sze != NULL) { - szedata_close(rxq->sze); - rxq->sze = NULL; + + if (rxq != NULL) { + if (rxq->sze != NULL) + szedata_close(rxq->sze); + rte_free(rxq); } } @@ -1115,9 +1147,11 @@ static void eth_tx_queue_release(void *q) { struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q; - if (txq->sze != NULL) { - szedata_close(txq->sze); - txq->sze = NULL; + + if (txq != NULL) { + if (txq->sze != NULL) + szedata_close(txq->sze); + rte_free(txq); } } @@ -1142,111 +1176,34 @@ eth_dev_close(struct rte_eth_dev *dev) dev->data->nb_tx_queues = 0; } -/** - * Function takes value from first IBUF status register. - * Values in IBUF and OBUF should be same. - * - * @param internals - * Pointer to device private structure. - * @return - * Link speed constant. - */ -static inline enum szedata2_link_speed -get_link_speed(const struct pmd_internals *internals) -{ - const volatile struct szedata2_ibuf *ibuf = - ibuf_ptr_by_index(internals->pci_rsc, 0); - uint32_t speed = (szedata2_read32(&ibuf->ibuf_st) & 0x70) >> 4; - switch (speed) { - case 0x03: - return SZEDATA2_LINK_SPEED_10G; - case 0x04: - return SZEDATA2_LINK_SPEED_40G; - case 0x05: - return SZEDATA2_LINK_SPEED_100G; - default: - return SZEDATA2_LINK_SPEED_DEFAULT; - } -} - static int eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) { struct rte_eth_link link; - struct rte_eth_link *link_ptr = &link; - struct rte_eth_link *dev_link = &dev->data->dev_link; - struct pmd_internals *internals = (struct pmd_internals *) - dev->data->dev_private; - const volatile struct szedata2_ibuf *ibuf; - uint32_t i; - bool link_is_up = false; - - switch (get_link_speed(internals)) { - case SZEDATA2_LINK_SPEED_10G: - link.link_speed = ETH_SPEED_NUM_10G; - break; - case SZEDATA2_LINK_SPEED_40G: - link.link_speed = ETH_SPEED_NUM_40G; - break; - case SZEDATA2_LINK_SPEED_100G: - link.link_speed = ETH_SPEED_NUM_100G; - break; - default: - link.link_speed = ETH_SPEED_NUM_10G; - break; - } - /* szedata2 uses only full duplex */ - link.link_duplex = ETH_LINK_FULL_DUPLEX; - - for (i = 0; i < szedata2_ibuf_count; i++) { - ibuf = ibuf_ptr_by_index(internals->pci_rsc, i); - /* - * Link is considered up if at least one ibuf is enabled - * and up. - */ - if (ibuf_is_enabled(ibuf) && ibuf_is_link_up(ibuf)) { - link_is_up = true; - break; - } - } - - link.link_status = (link_is_up) ? ETH_LINK_UP : ETH_LINK_DOWN; + memset(&link, 0, sizeof(link)); + link.link_speed = ETH_SPEED_NUM_100G; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_status = ETH_LINK_UP; link.link_autoneg = ETH_LINK_FIXED; - rte_atomic64_cmpset((uint64_t *)dev_link, *(uint64_t *)dev_link, - *(uint64_t *)link_ptr); - + rte_eth_linkstatus_set(dev, &link); return 0; } static int -eth_dev_set_link_up(struct rte_eth_dev *dev) +eth_dev_set_link_up(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internals *internals = (struct pmd_internals *) - dev->data->dev_private; - uint32_t i; - - for (i = 0; i < szedata2_ibuf_count; i++) - ibuf_enable(ibuf_ptr_by_index(internals->pci_rsc, i)); - for (i = 0; i < szedata2_obuf_count; i++) - obuf_enable(obuf_ptr_by_index(internals->pci_rsc, i)); + PMD_DRV_LOG(WARNING, "Setting link up is not supported."); return 0; } static int -eth_dev_set_link_down(struct rte_eth_dev *dev) +eth_dev_set_link_down(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internals *internals = (struct pmd_internals *) - dev->data->dev_private; - uint32_t i; - - for (i = 0; i < szedata2_ibuf_count; i++) - ibuf_disable(ibuf_ptr_by_index(internals->pci_rsc, i)); - for (i = 0; i < szedata2_obuf_count; i++) - obuf_disable(obuf_ptr_by_index(internals->pci_rsc, i)); + PMD_DRV_LOG(WARNING, "Setting link down is not supported."); return 0; } @@ -1254,26 +1211,50 @@ static int eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused, - unsigned int socket_id __rte_unused, + unsigned int socket_id, const struct rte_eth_rxconf *rx_conf __rte_unused, struct rte_mempool *mb_pool) { - struct pmd_internals *internals = dev->data->dev_private; - struct szedata2_rx_queue *rxq = &internals->rx_queue[rx_queue_id]; + struct szedata2_rx_queue *rxq; int ret; - uint32_t rx = 1 << rx_queue_id; + struct pmd_internals *internals = dev->data->dev_private; + uint8_t rx_channel = internals->rxq_base_id + rx_queue_id; + uint32_t rx = 1 << rx_channel; uint32_t tx = 0; - rxq->sze = szedata_open(internals->sze_dev); - if (rxq->sze == NULL) + PMD_INIT_FUNC_TRACE(); + + if (dev->data->rx_queues[rx_queue_id] != NULL) { + eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]); + dev->data->rx_queues[rx_queue_id] = NULL; + } + + rxq = rte_zmalloc_socket("szedata2 rx queue", + sizeof(struct szedata2_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxq == NULL) { + PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for rx queue id " + "%" PRIu16 "!", rx_queue_id); + return -ENOMEM; + } + + rxq->priv = internals; + rxq->sze = szedata_open(internals->sze_dev_path); + if (rxq->sze == NULL) { + PMD_INIT_LOG(ERR, "szedata_open() failed for rx queue id " + "%" PRIu16 "!", rx_queue_id); + eth_rx_queue_release(rxq); return -EINVAL; + } ret = szedata_subscribe3(rxq->sze, &rx, &tx); if (ret != 0 || rx == 0) { - szedata_close(rxq->sze); - rxq->sze = NULL; + PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for rx queue id " + "%" PRIu16 "!", rx_queue_id); + eth_rx_queue_release(rxq); return -EINVAL; } - rxq->rx_channel = rx_queue_id; + rxq->rx_channel = rx_channel; + rxq->qid = rx_queue_id; rxq->in_port = dev->data->port_id; rxq->mb_pool = mb_pool; rxq->rx_pkts = 0; @@ -1281,6 +1262,11 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, rxq->err_pkts = 0; dev->data->rx_queues[rx_queue_id] = rxq; + + PMD_INIT_LOG(DEBUG, "Configured rx queue id %" PRIu16 " on socket " + "%u (channel id %u).", rxq->qid, socket_id, + rxq->rx_channel); + return 0; } @@ -1288,89 +1274,93 @@ static int eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused, - unsigned int socket_id __rte_unused, + unsigned int socket_id, const struct rte_eth_txconf *tx_conf __rte_unused) { - struct pmd_internals *internals = dev->data->dev_private; - struct szedata2_tx_queue *txq = &internals->tx_queue[tx_queue_id]; + struct szedata2_tx_queue *txq; int ret; + struct pmd_internals *internals = dev->data->dev_private; + uint8_t tx_channel = internals->txq_base_id + tx_queue_id; uint32_t rx = 0; - uint32_t tx = 1 << tx_queue_id; + uint32_t tx = 1 << tx_channel; + + PMD_INIT_FUNC_TRACE(); - txq->sze = szedata_open(internals->sze_dev); - if (txq->sze == NULL) + if (dev->data->tx_queues[tx_queue_id] != NULL) { + eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]); + dev->data->tx_queues[tx_queue_id] = NULL; + } + + txq = rte_zmalloc_socket("szedata2 tx queue", + sizeof(struct szedata2_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (txq == NULL) { + PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for tx queue id " + "%" PRIu16 "!", tx_queue_id); + return -ENOMEM; + } + + txq->priv = internals; + txq->sze = szedata_open(internals->sze_dev_path); + if (txq->sze == NULL) { + PMD_INIT_LOG(ERR, "szedata_open() failed for tx queue id " + "%" PRIu16 "!", tx_queue_id); + eth_tx_queue_release(txq); return -EINVAL; + } ret = szedata_subscribe3(txq->sze, &rx, &tx); if (ret != 0 || tx == 0) { - szedata_close(txq->sze); - txq->sze = NULL; + PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for tx queue id " + "%" PRIu16 "!", tx_queue_id); + eth_tx_queue_release(txq); return -EINVAL; } - txq->tx_channel = tx_queue_id; + txq->tx_channel = tx_channel; + txq->qid = tx_queue_id; txq->tx_pkts = 0; txq->tx_bytes = 0; txq->err_pkts = 0; dev->data->tx_queues[tx_queue_id] = txq; + + PMD_INIT_LOG(DEBUG, "Configured tx queue id %" PRIu16 " on socket " + "%u (channel id %u).", txq->qid, socket_id, + txq->tx_channel); + return 0; } -static void +static int eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused, struct ether_addr *mac_addr __rte_unused) { + return 0; } static void -eth_promiscuous_enable(struct rte_eth_dev *dev) +eth_promiscuous_enable(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internals *internals = (struct pmd_internals *) - dev->data->dev_private; - uint32_t i; - - for (i = 0; i < szedata2_ibuf_count; i++) { - ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i), - SZEDATA2_MAC_CHMODE_PROMISC); - } + PMD_DRV_LOG(WARNING, "Enabling promiscuous mode is not supported. " + "The card is always in promiscuous mode."); } static void -eth_promiscuous_disable(struct rte_eth_dev *dev) +eth_promiscuous_disable(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internals *internals = (struct pmd_internals *) - dev->data->dev_private; - uint32_t i; - - for (i = 0; i < szedata2_ibuf_count; i++) { - ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i), - SZEDATA2_MAC_CHMODE_ONLY_VALID); - } + PMD_DRV_LOG(WARNING, "Disabling promiscuous mode is not supported. " + "The card is always in promiscuous mode."); } static void -eth_allmulticast_enable(struct rte_eth_dev *dev) +eth_allmulticast_enable(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internals *internals = (struct pmd_internals *) - dev->data->dev_private; - uint32_t i; - - for (i = 0; i < szedata2_ibuf_count; i++) { - ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i), - SZEDATA2_MAC_CHMODE_ALL_MULTICAST); - } + PMD_DRV_LOG(WARNING, "Enabling allmulticast mode is not supported."); } static void -eth_allmulticast_disable(struct rte_eth_dev *dev) +eth_allmulticast_disable(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internals *internals = (struct pmd_internals *) - dev->data->dev_private; - uint32_t i; - - for (i = 0; i < szedata2_ibuf_count; i++) { - ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i), - SZEDATA2_MAC_CHMODE_ONLY_VALID); - } + PMD_DRV_LOG(WARNING, "Disabling allmulticast mode is not supported."); } static const struct eth_dev_ops ops = { @@ -1417,9 +1407,9 @@ get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index) FILE *fd; char pcislot_path[PATH_MAX]; uint32_t domain; - uint32_t bus; - uint32_t devid; - uint32_t function; + uint8_t bus; + uint8_t devid; + uint8_t function; dir = opendir("/sys/class/combo"); if (dir == NULL) @@ -1444,7 +1434,7 @@ get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index) if (fd == NULL) continue; - ret = fscanf(fd, "%4" PRIx16 ":%2" PRIx8 ":%2" PRIx8 ".%" PRIx8, + ret = fscanf(fd, "%8" SCNx32 ":%2" SCNx8 ":%2" SCNx8 ".%" SCNx8, &domain, &bus, &devid, &function); fclose(fd); if (ret != 4) @@ -1464,109 +1454,62 @@ get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index) return -1; } +/** + * @brief Initializes rte_eth_dev device. + * @param dev Device to initialize. + * @param pi Structure with info about DMA queues. + * @return 0 on success, negative error code on error. + */ static int -rte_szedata2_eth_dev_init(struct rte_eth_dev *dev) +rte_szedata2_eth_dev_init(struct rte_eth_dev *dev, struct port_info *pi) { + int ret; + uint32_t szedata2_index; + char name[PATH_MAX]; struct rte_eth_dev_data *data = dev->data; struct pmd_internals *internals = (struct pmd_internals *) data->dev_private; - struct szedata *szedata_temp; - int ret; - uint32_t szedata2_index; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_pci_addr *pci_addr = &pci_dev->addr; - struct rte_mem_resource *pci_rsc = - &pci_dev->mem_resource[PCI_RESOURCE_NUMBER]; - char rsc_filename[PATH_MAX]; - void *pci_resource_ptr = NULL; - int fd; - RTE_LOG(INFO, PMD, "Initializing szedata2 device (" PCI_PRI_FMT ")\n", - pci_addr->domain, pci_addr->bus, pci_addr->devid, - pci_addr->function); + PMD_INIT_FUNC_TRACE(); + PMD_INIT_LOG(INFO, "Initializing eth_dev %s (driver %s)", data->name, + dev->device->driver->name); + + /* Fill internal private structure. */ + internals->dev = dev; /* Get index of szedata2 device file and create path to device file */ - ret = get_szedata2_index(pci_addr, &szedata2_index); + ret = get_szedata2_index(&pci_dev->addr, &szedata2_index); if (ret != 0) { - RTE_LOG(ERR, PMD, "Failed to get szedata2 device index!\n"); + PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!"); return -ENODEV; } - snprintf(internals->sze_dev, PATH_MAX, SZEDATA2_DEV_PATH_FMT, - szedata2_index); - - RTE_LOG(INFO, PMD, "SZEDATA2 path: %s\n", internals->sze_dev); - - /* - * Get number of available DMA RX and TX channels, which is maximum - * number of queues that can be created and store it in private device - * data structure. - */ - szedata_temp = szedata_open(internals->sze_dev); - if (szedata_temp == NULL) { - RTE_LOG(ERR, PMD, "szedata_open(): failed to open %s", - internals->sze_dev); - return -EINVAL; + snprintf(name, PATH_MAX, SZEDATA2_DEV_PATH_FMT, szedata2_index); + internals->sze_dev_path = strdup(name); + if (internals->sze_dev_path == NULL) { + PMD_INIT_LOG(ERR, "strdup() failed!"); + return -ENOMEM; } - internals->max_rx_queues = szedata_ifaces_available(szedata_temp, - SZE2_DIR_RX); - internals->max_tx_queues = szedata_ifaces_available(szedata_temp, - SZE2_DIR_TX); - szedata_close(szedata_temp); - - RTE_LOG(INFO, PMD, "Available DMA channels RX: %u TX: %u\n", - internals->max_rx_queues, internals->max_tx_queues); + PMD_INIT_LOG(INFO, "SZEDATA2 path: %s", internals->sze_dev_path); + internals->max_rx_queues = pi->rx_count; + internals->max_tx_queues = pi->tx_count; + internals->rxq_base_id = pi->rx_base_id; + internals->txq_base_id = pi->tx_base_id; + PMD_INIT_LOG(INFO, "%u RX DMA channels from id %u", + internals->max_rx_queues, internals->rxq_base_id); + PMD_INIT_LOG(INFO, "%u TX DMA channels from id %u", + internals->max_tx_queues, internals->txq_base_id); /* Set rx, tx burst functions */ - if (data->dev_conf.rxmode.enable_scatter == 1 || - data->scattered_rx == 1) { + if (data->scattered_rx == 1) dev->rx_pkt_burst = eth_szedata2_rx_scattered; - data->scattered_rx = 1; - } else { + else dev->rx_pkt_burst = eth_szedata2_rx; - data->scattered_rx = 0; - } dev->tx_pkt_burst = eth_szedata2_tx; /* Set function callbacks for Ethernet API */ dev->dev_ops = &ops; - rte_eth_copy_pci_info(dev, pci_dev); - - /* mmap pci resource0 file to rte_mem_resource structure */ - if (pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr == - 0) { - RTE_LOG(ERR, PMD, "Missing resource%u file\n", - PCI_RESOURCE_NUMBER); - return -EINVAL; - } - snprintf(rsc_filename, PATH_MAX, - "%s/" PCI_PRI_FMT "/resource%u", rte_pci_get_sysfs_path(), - pci_addr->domain, pci_addr->bus, - pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER); - fd = open(rsc_filename, O_RDWR); - if (fd < 0) { - RTE_LOG(ERR, PMD, "Could not open file %s\n", rsc_filename); - return -EINVAL; - } - - pci_resource_ptr = mmap(0, - pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len, - PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - close(fd); - if (pci_resource_ptr == MAP_FAILED) { - RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n", - rsc_filename, fd); - return -EINVAL; - } - pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = pci_resource_ptr; - internals->pci_rsc = pci_rsc; - - RTE_LOG(DEBUG, PMD, "resource%u phys_addr = 0x%llx len = %llu " - "virt addr = %llx\n", PCI_RESOURCE_NUMBER, - (unsigned long long)pci_rsc->phys_addr, - (unsigned long long)pci_rsc->len, - (unsigned long long)pci_rsc->addr); - /* Get link state */ eth_link_update(dev, 0); @@ -1574,40 +1517,37 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev) data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr), RTE_CACHE_LINE_SIZE); if (data->mac_addrs == NULL) { - RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n"); - munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr, - pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len); - return -EINVAL; + PMD_INIT_LOG(ERR, "Could not alloc space for MAC address!"); + free(internals->sze_dev_path); + return -ENOMEM; } ether_addr_copy(ð_addr, data->mac_addrs); - /* At initial state COMBO card is in promiscuous mode so disable it */ - eth_promiscuous_disable(dev); - - RTE_LOG(INFO, PMD, "szedata2 device (" - PCI_PRI_FMT ") successfully initialized\n", - pci_addr->domain, pci_addr->bus, pci_addr->devid, - pci_addr->function); + PMD_INIT_LOG(INFO, "%s device %s successfully initialized", + dev->device->driver->name, data->name); return 0; } +/** + * @brief Unitializes rte_eth_dev device. + * @param dev Device to uninitialize. + * @return 0 on success, negative error code on error. + */ static int rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev) { - struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_pci_addr *pci_addr = &pci_dev->addr; + struct pmd_internals *internals = (struct pmd_internals *) + dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + free(internals->sze_dev_path); rte_free(dev->data->mac_addrs); - dev->data->mac_addrs = NULL; - munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr, - pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len); - RTE_LOG(INFO, PMD, "szedata2 device (" - PCI_PRI_FMT ") successfully uninitialized\n", - pci_addr->domain, pci_addr->bus, pci_addr->devid, - pci_addr->function); + PMD_DRV_LOG(INFO, "%s device %s successfully uninitialized", + dev->device->driver->name, dev->data->name); return 0; } @@ -1625,22 +1565,350 @@ static const struct rte_pci_id rte_szedata2_pci_id_table[] = { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NETCOPE_COMBO100G2) }, + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, + PCI_DEVICE_ID_NETCOPE_NFB200G2QL) + }, { .vendor_id = 0, } }; +/** + * @brief Gets info about DMA queues for ports. + * @param pci_dev PCI device structure. + * @param port_count Pointer to variable set with number of ports. + * @param pi Pointer to array of structures with info about DMA queues + * for ports. + * @param max_ports Maximum number of ports. + * @return 0 on success, negative error code on error. + */ +static int +get_port_info(struct rte_pci_device *pci_dev, unsigned int *port_count, + struct port_info *pi, unsigned int max_ports) +{ + struct szedata *szedata_temp; + char sze_dev_path[PATH_MAX]; + uint32_t szedata2_index; + int ret; + uint16_t max_rx_queues; + uint16_t max_tx_queues; + + if (max_ports == 0) + return -EINVAL; + + memset(pi, 0, max_ports * sizeof(struct port_info)); + *port_count = 0; + + /* Get index of szedata2 device file and create path to device file */ + ret = get_szedata2_index(&pci_dev->addr, &szedata2_index); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!"); + return -ENODEV; + } + snprintf(sze_dev_path, PATH_MAX, SZEDATA2_DEV_PATH_FMT, szedata2_index); + + /* + * Get number of available DMA RX and TX channels, which is maximum + * number of queues that can be created. + */ + szedata_temp = szedata_open(sze_dev_path); + if (szedata_temp == NULL) { + PMD_INIT_LOG(ERR, "szedata_open(%s) failed", sze_dev_path); + return -EINVAL; + } + max_rx_queues = szedata_ifaces_available(szedata_temp, SZE2_DIR_RX); + max_tx_queues = szedata_ifaces_available(szedata_temp, SZE2_DIR_TX); + PMD_INIT_LOG(INFO, "Available DMA channels RX: %u TX: %u", + max_rx_queues, max_tx_queues); + if (max_rx_queues > RTE_ETH_SZEDATA2_MAX_RX_QUEUES) { + PMD_INIT_LOG(ERR, "%u RX queues exceeds supported number %u", + max_rx_queues, RTE_ETH_SZEDATA2_MAX_RX_QUEUES); + szedata_close(szedata_temp); + return -EINVAL; + } + if (max_tx_queues > RTE_ETH_SZEDATA2_MAX_TX_QUEUES) { + PMD_INIT_LOG(ERR, "%u TX queues exceeds supported number %u", + max_tx_queues, RTE_ETH_SZEDATA2_MAX_TX_QUEUES); + szedata_close(szedata_temp); + return -EINVAL; + } + + if (pci_dev->id.device_id == PCI_DEVICE_ID_NETCOPE_NFB200G2QL) { + unsigned int i; + unsigned int rx_queues = max_rx_queues / max_ports; + unsigned int tx_queues = max_tx_queues / max_ports; + + /* + * Number of queues reported by szedata_ifaces_available() + * is the number of all queues from all DMA controllers which + * may reside at different numa locations. + * All queues from the same DMA controller have the same numa + * node. + * Numa node from the first queue of each DMA controller is + * retrieved. + * If the numa node differs from the numa node of the queues + * from the previous DMA controller the queues are assigned + * to the next port. + */ + + for (i = 0; i < max_ports; i++) { + int numa_rx = szedata_get_area_numa_node(szedata_temp, + SZE2_DIR_RX, rx_queues * i); + int numa_tx = szedata_get_area_numa_node(szedata_temp, + SZE2_DIR_TX, tx_queues * i); + unsigned int port_rx_queues = numa_rx != -1 ? + rx_queues : 0; + unsigned int port_tx_queues = numa_tx != -1 ? + tx_queues : 0; + PMD_INIT_LOG(DEBUG, "%u rx queues from id %u, numa %d", + rx_queues, rx_queues * i, numa_rx); + PMD_INIT_LOG(DEBUG, "%u tx queues from id %u, numa %d", + tx_queues, tx_queues * i, numa_tx); + + if (port_rx_queues != 0 && port_tx_queues != 0 && + numa_rx != numa_tx) { + PMD_INIT_LOG(ERR, "RX queue %u numa %d differs " + "from TX queue %u numa %d " + "unexpectedly", + rx_queues * i, numa_rx, + tx_queues * i, numa_tx); + szedata_close(szedata_temp); + return -EINVAL; + } else if (port_rx_queues == 0 && port_tx_queues == 0) { + continue; + } else { + unsigned int j; + unsigned int current = *port_count; + int port_numa = port_rx_queues != 0 ? + numa_rx : numa_tx; + + for (j = 0; j < *port_count; j++) { + if (pi[j].numa_node == + port_numa) { + current = j; + break; + } + } + if (pi[current].rx_count == 0 && + pi[current].tx_count == 0) { + pi[current].rx_base_id = rx_queues * i; + pi[current].tx_base_id = tx_queues * i; + (*port_count)++; + } else if ((rx_queues * i != + pi[current].rx_base_id + + pi[current].rx_count) || + (tx_queues * i != + pi[current].tx_base_id + + pi[current].tx_count)) { + PMD_INIT_LOG(ERR, "Queue ids does not " + "fulfill constraints"); + szedata_close(szedata_temp); + return -EINVAL; + } + pi[current].rx_count += port_rx_queues; + pi[current].tx_count += port_tx_queues; + pi[current].numa_node = port_numa; + } + } + } else { + pi[0].rx_count = max_rx_queues; + pi[0].tx_count = max_tx_queues; + pi[0].numa_node = pci_dev->device.numa_node; + *port_count = 1; + } + + szedata_close(szedata_temp); + return 0; +} + +/** + * @brief Allocates rte_eth_dev device. + * @param pci_dev Corresponding PCI device. + * @param numa_node NUMA node on which device is allocated. + * @param port_no Id of rte_eth_device created on PCI device pci_dev. + * @return Pointer to allocated device or NULL on error. + */ +static struct rte_eth_dev * +szedata2_eth_dev_allocate(struct rte_pci_device *pci_dev, int numa_node, + unsigned int port_no) +{ + struct rte_eth_dev *eth_dev; + char name[RTE_ETH_NAME_MAX_LEN]; + + PMD_INIT_FUNC_TRACE(); + + snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s" + SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT, + pci_dev->device.name, port_no); + PMD_INIT_LOG(DEBUG, "Allocating eth_dev %s", name); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_allocate(name); + if (!eth_dev) + return NULL; + + eth_dev->data->dev_private = rte_zmalloc_socket(name, + sizeof(struct pmd_internals), RTE_CACHE_LINE_SIZE, + numa_node); + if (!eth_dev->data->dev_private) { + rte_eth_dev_release_port(eth_dev); + return NULL; + } + } else { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) + return NULL; + } + + eth_dev->device = &pci_dev->device; + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->numa_node = numa_node; + return eth_dev; +} + +/** + * @brief Releases interval of rte_eth_dev devices from array. + * @param eth_devs Array of pointers to rte_eth_dev devices. + * @param from Index in array eth_devs to start with. + * @param to Index in array right after the last element to release. + * + * Used for releasing at failed initialization. + */ +static void +szedata2_eth_dev_release_interval(struct rte_eth_dev **eth_devs, + unsigned int from, unsigned int to) +{ + unsigned int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = from; i < to; i++) { + rte_szedata2_eth_dev_uninit(eth_devs[i]); + rte_eth_dev_pci_release(eth_devs[i]); + } +} + +/** + * @brief Callback .probe for struct rte_pci_driver. + */ static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_probe(pci_dev, - sizeof(struct pmd_internals), rte_szedata2_eth_dev_init); + struct port_info port_info[SZEDATA2_MAX_PORTS]; + unsigned int port_count; + int ret; + unsigned int i; + struct pci_dev_list_entry *list_entry; + struct rte_eth_dev *eth_devs[SZEDATA2_MAX_PORTS] = {NULL,}; + + PMD_INIT_FUNC_TRACE(); + + ret = get_port_info(pci_dev, &port_count, port_info, + SZEDATA2_MAX_PORTS); + if (ret != 0) + return ret; + + if (port_count == 0) { + PMD_INIT_LOG(ERR, "No available ports!"); + return -ENODEV; + } + + list_entry = rte_zmalloc(NULL, sizeof(struct pci_dev_list_entry), + RTE_CACHE_LINE_SIZE); + if (list_entry == NULL) { + PMD_INIT_LOG(ERR, "rte_zmalloc() failed!"); + return -ENOMEM; + } + + for (i = 0; i < port_count; i++) { + eth_devs[i] = szedata2_eth_dev_allocate(pci_dev, + port_info[i].numa_node, i); + if (eth_devs[i] == NULL) { + PMD_INIT_LOG(ERR, "Failed to alloc eth_dev for port %u", + i); + szedata2_eth_dev_release_interval(eth_devs, 0, i); + rte_free(list_entry); + return -ENOMEM; + } + + ret = rte_szedata2_eth_dev_init(eth_devs[i], &port_info[i]); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to init eth_dev for port %u", + i); + rte_eth_dev_pci_release(eth_devs[i]); + szedata2_eth_dev_release_interval(eth_devs, 0, i); + rte_free(list_entry); + return ret; + } + + rte_eth_dev_probing_finish(eth_devs[i]); + } + + /* + * Add pci_dev to list of PCI devices for this driver + * which is used at remove callback to release all created eth_devs. + */ + list_entry->pci_dev = pci_dev; + list_entry->port_count = port_count; + LIST_INSERT_HEAD(&szedata2_pci_dev_list, list_entry, next); + return 0; } +/** + * @brief Callback .remove for struct rte_pci_driver. + */ static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, - rte_szedata2_eth_dev_uninit); + unsigned int i; + unsigned int port_count; + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; + int ret; + int retval = 0; + bool found = false; + struct pci_dev_list_entry *list_entry = NULL; + + PMD_INIT_FUNC_TRACE(); + + LIST_FOREACH(list_entry, &szedata2_pci_dev_list, next) { + if (list_entry->pci_dev == pci_dev) { + port_count = list_entry->port_count; + found = true; + break; + } + } + LIST_REMOVE(list_entry, next); + rte_free(list_entry); + + if (!found) { + PMD_DRV_LOG(ERR, "PCI device " PCI_PRI_FMT " not found", + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + return -ENODEV; + } + + for (i = 0; i < port_count; i++) { + snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s" + SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT, + pci_dev->device.name, i); + PMD_DRV_LOG(DEBUG, "Removing eth_dev %s", name); + eth_dev = rte_eth_dev_allocated(name); + if (!eth_dev) { + PMD_DRV_LOG(ERR, "eth_dev %s not found", name); + retval = retval ? retval : -ENODEV; + } + + ret = rte_szedata2_eth_dev_uninit(eth_dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "eth_dev %s uninit failed", name); + retval = retval ? retval : ret; + } + + rte_eth_dev_pci_release(eth_dev); + } + + return retval; } static struct rte_pci_driver szedata2_eth_driver = { @@ -1652,4 +1920,16 @@ static struct rte_pci_driver szedata2_eth_driver = { RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver); RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table); RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME, - "* combo6core & combov3 & szedata2 & szedata2_cv3"); + "* combo6core & combov3 & szedata2 & ( szedata2_cv3 | szedata2_cv3_fdt )"); + +RTE_INIT(szedata2_init_log); +static void +szedata2_init_log(void) +{ + szedata2_logtype_init = rte_log_register("pmd.net.szedata2.init"); + if (szedata2_logtype_init >= 0) + rte_log_set_level(szedata2_logtype_init, RTE_LOG_NOTICE); + szedata2_logtype_driver = rte_log_register("pmd.net.szedata2.driver"); + if (szedata2_logtype_driver >= 0) + rte_log_set_level(szedata2_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/drivers/net/szedata2/rte_eth_szedata2.h b/drivers/net/szedata2/rte_eth_szedata2.h index f25d4c59..26a82b35 100644 --- a/drivers/net/szedata2/rte_eth_szedata2.h +++ b/drivers/net/szedata2/rte_eth_szedata2.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2015 - 2016 CESNET - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of CESNET nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015 - 2016 CESNET */ #ifndef RTE_PMD_SZEDATA2_H_ @@ -47,9 +18,7 @@ #define PCI_DEVICE_ID_NETCOPE_COMBO80G 0xcb80 #define PCI_DEVICE_ID_NETCOPE_COMBO100G 0xc1c1 #define PCI_DEVICE_ID_NETCOPE_COMBO100G2 0xc2c1 - -/* number of PCI resource used by COMBO card */ -#define PCI_RESOURCE_NUMBER 0 +#define PCI_DEVICE_ID_NETCOPE_NFB200G2QL 0xc250 /* szedata2_packet header length == 4 bytes == 2B segment size + 2B hw size */ #define RTE_SZE2_PACKET_HEADER_SIZE 4 diff --git a/drivers/net/szedata2/szedata2_iobuf.c b/drivers/net/szedata2/szedata2_iobuf.c deleted file mode 100644 index 3b9a71fe..00000000 --- a/drivers/net/szedata2/szedata2_iobuf.c +++ /dev/null @@ -1,203 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2017 CESNET - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of CESNET nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include - -#include - -#include "szedata2_iobuf.h" - -/* - * IBUFs and OBUFs can generally be located at different offsets in different - * firmwares (modes). - * This part defines base offsets of IBUFs and OBUFs for various cards - * and firmwares (modes). - * Type of firmware (mode) is set through configuration option - * CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS. - * Possible values are: - * 0 - for cards (modes): - * NFB-100G1 (100G1) - * - * 1 - for cards (modes): - * NFB-100G2Q (100G1) - * - * 2 - for cards (modes): - * NFB-40G2 (40G2) - * NFB-100G2C (100G2) - * NFB-100G2Q (40G2) - * - * 3 - for cards (modes): - * NFB-40G2 (10G8) - * NFB-100G2Q (10G8) - * - * 4 - for cards (modes): - * NFB-100G1 (10G10) - * - * 5 - for experimental firmwares and future use - */ -#if !defined(RTE_LIBRTE_PMD_SZEDATA2_AS) -#error "RTE_LIBRTE_PMD_SZEDATA2_AS has to be defined" -#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 0 - -/* - * Cards (modes): - * NFB-100G1 (100G1) - */ - -const uint32_t szedata2_ibuf_base_table[] = { - 0x8000 -}; -const uint32_t szedata2_obuf_base_table[] = { - 0x9000 -}; - -#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 1 - -/* - * Cards (modes): - * NFB-100G2Q (100G1) - */ - -const uint32_t szedata2_ibuf_base_table[] = { - 0x8800 -}; -const uint32_t szedata2_obuf_base_table[] = { - 0x9800 -}; - -#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 2 - -/* - * Cards (modes): - * NFB-40G2 (40G2) - * NFB-100G2C (100G2) - * NFB-100G2Q (40G2) - */ - -const uint32_t szedata2_ibuf_base_table[] = { - 0x8000, - 0x8800 -}; -const uint32_t szedata2_obuf_base_table[] = { - 0x9000, - 0x9800 -}; - -#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 3 - -/* - * Cards (modes): - * NFB-40G2 (10G8) - * NFB-100G2Q (10G8) - */ - -const uint32_t szedata2_ibuf_base_table[] = { - 0x8000, - 0x8200, - 0x8400, - 0x8600, - 0x8800, - 0x8A00, - 0x8C00, - 0x8E00 -}; -const uint32_t szedata2_obuf_base_table[] = { - 0x9000, - 0x9200, - 0x9400, - 0x9600, - 0x9800, - 0x9A00, - 0x9C00, - 0x9E00 -}; - -#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 4 - -/* - * Cards (modes): - * NFB-100G1 (10G10) - */ - -const uint32_t szedata2_ibuf_base_table[] = { - 0x8000, - 0x8200, - 0x8400, - 0x8600, - 0x8800, - 0x8A00, - 0x8C00, - 0x8E00, - 0x9000, - 0x9200 -}; -const uint32_t szedata2_obuf_base_table[] = { - 0xA000, - 0xA200, - 0xA400, - 0xA600, - 0xA800, - 0xAA00, - 0xAC00, - 0xAE00, - 0xB000, - 0xB200 -}; - -#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 5 - -/* - * Future use and experimental firmwares. - */ - -const uint32_t szedata2_ibuf_base_table[] = { - 0x8000, - 0x8200, - 0x8400, - 0x8600, - 0x8800 -}; -const uint32_t szedata2_obuf_base_table[] = { - 0x9000, - 0x9200, - 0x9400, - 0x9600, - 0x9800 -}; - -#else -#error "RTE_LIBRTE_PMD_SZEDATA2_AS has wrong value, see comments in config file" -#endif - -const uint32_t szedata2_ibuf_count = RTE_DIM(szedata2_ibuf_base_table); -const uint32_t szedata2_obuf_count = RTE_DIM(szedata2_obuf_base_table); diff --git a/drivers/net/szedata2/szedata2_iobuf.h b/drivers/net/szedata2/szedata2_iobuf.h deleted file mode 100644 index f1ccb3b2..00000000 --- a/drivers/net/szedata2/szedata2_iobuf.h +++ /dev/null @@ -1,356 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2017 CESNET - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of CESNET nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SZEDATA2_IOBUF_H_ -#define _SZEDATA2_IOBUF_H_ - -#include -#include - -#include -#include -#include - -/* IBUF offsets from the beginning of the PCI resource address space. */ -extern const uint32_t szedata2_ibuf_base_table[]; -extern const uint32_t szedata2_ibuf_count; - -/* OBUF offsets from the beginning of the PCI resource address space. */ -extern const uint32_t szedata2_obuf_base_table[]; -extern const uint32_t szedata2_obuf_count; - -enum szedata2_link_speed { - SZEDATA2_LINK_SPEED_DEFAULT = 0, - SZEDATA2_LINK_SPEED_10G, - SZEDATA2_LINK_SPEED_40G, - SZEDATA2_LINK_SPEED_100G, -}; - -enum szedata2_mac_check_mode { - SZEDATA2_MAC_CHMODE_PROMISC = 0x0, - SZEDATA2_MAC_CHMODE_ONLY_VALID = 0x1, - SZEDATA2_MAC_CHMODE_ALL_BROADCAST = 0x2, - SZEDATA2_MAC_CHMODE_ALL_MULTICAST = 0x3, -}; - -/** - * Macro takes pointer to pci resource structure (rsc) - * and returns pointer to mapped resource memory at - * specified offset (offset) typecast to the type (type). - */ -#define SZEDATA2_PCI_RESOURCE_PTR(rsc, offset, type) \ - ((type)(((uint8_t *)(rsc)->addr) + (offset))) - -/** - * Maximum possible number of MAC addresses (limited by IBUF status - * register value MAC_COUNT which has 5 bits). - */ -#define SZEDATA2_IBUF_MAX_MAC_COUNT 32 - -/** - * Structure describes IBUF address space. - */ -struct szedata2_ibuf { - /** Total Received Frames Counter low part */ - uint32_t trfcl; /**< 0x00 */ - /** Correct Frames Counter low part */ - uint32_t cfcl; /**< 0x04 */ - /** Discarded Frames Counter low part */ - uint32_t dfcl; /**< 0x08 */ - /** Counter of frames discarded due to buffer overflow low part */ - uint32_t bodfcl; /**< 0x0C */ - /** Total Received Frames Counter high part */ - uint32_t trfch; /**< 0x10 */ - /** Correct Frames Counter high part */ - uint32_t cfch; /**< 0x14 */ - /** Discarded Frames Counter high part */ - uint32_t dfch; /**< 0x18 */ - /** Counter of frames discarded due to buffer overflow high part */ - uint32_t bodfch; /**< 0x1C */ - /** IBUF enable register */ - uint32_t ibuf_en; /**< 0x20 */ - /** Error mask register */ - uint32_t err_mask; /**< 0x24 */ - /** IBUF status register */ - uint32_t ibuf_st; /**< 0x28 */ - /** IBUF command register */ - uint32_t ibuf_cmd; /**< 0x2C */ - /** Minimum frame length allowed */ - uint32_t mfla; /**< 0x30 */ - /** Frame MTU */ - uint32_t mtu; /**< 0x34 */ - /** MAC address check mode */ - uint32_t mac_chmode; /**< 0x38 */ - /** Octets Received OK Counter low part */ - uint32_t orocl; /**< 0x3C */ - /** Octets Received OK Counter high part */ - uint32_t oroch; /**< 0x40 */ - /** reserved */ - uint8_t reserved[60]; /**< 0x4C */ - /** IBUF memory for MAC addresses */ - uint32_t mac_mem[2 * SZEDATA2_IBUF_MAX_MAC_COUNT]; /**< 0x80 */ -} __rte_packed; - -/** - * Structure describes OBUF address space. - */ -struct szedata2_obuf { - /** Total Sent Frames Counter low part */ - uint32_t tsfcl; /**< 0x00 */ - /** Octets Sent Counter low part */ - uint32_t oscl; /**< 0x04 */ - /** Total Discarded Frames Counter low part */ - uint32_t tdfcl; /**< 0x08 */ - /** reserved */ - uint32_t reserved1; /**< 0x0C */ - /** Total Sent Frames Counter high part */ - uint32_t tsfch; /**< 0x10 */ - /** Octets Sent Counter high part */ - uint32_t osch; /**< 0x14 */ - /** Total Discarded Frames Counter high part */ - uint32_t tdfch; /**< 0x18 */ - /** reserved */ - uint32_t reserved2; /**< 0x1C */ - /** OBUF enable register */ - uint32_t obuf_en; /**< 0x20 */ - /** reserved */ - uint64_t reserved3; /**< 0x24 */ - /** OBUF control register */ - uint32_t ctrl; /**< 0x2C */ - /** OBUF status register */ - uint32_t obuf_st; /**< 0x30 */ -} __rte_packed; - -/** - * Wrapper for reading 4 bytes from device memory in correct endianness. - * - * @param addr - * Address for reading. - * @return - * 4 B value. - */ -static inline uint32_t -szedata2_read32(const volatile void *addr) -{ - return rte_le_to_cpu_32(rte_read32(addr)); -} - -/** - * Wrapper for writing 4 bytes to device memory in correct endianness. - * - * @param value - * Value to write. - * @param addr - * Address for writing. - */ -static inline void -szedata2_write32(uint32_t value, volatile void *addr) -{ - rte_write32(rte_cpu_to_le_32(value), addr); -} - -/** - * Get pointer to IBUF structure according to specified index. - * - * @param rsc - * Pointer to base address of memory resource. - * @param index - * Index of IBUF. - * @return - * Pointer to IBUF structure. - */ -static inline struct szedata2_ibuf * -ibuf_ptr_by_index(struct rte_mem_resource *rsc, uint32_t index) -{ - if (index >= szedata2_ibuf_count) - index = szedata2_ibuf_count - 1; - return SZEDATA2_PCI_RESOURCE_PTR(rsc, szedata2_ibuf_base_table[index], - struct szedata2_ibuf *); -} - -/** - * Get pointer to OBUF structure according to specified idnex. - * - * @param rsc - * Pointer to base address of memory resource. - * @param index - * Index of OBUF. - * @return - * Pointer to OBUF structure. - */ -static inline struct szedata2_obuf * -obuf_ptr_by_index(struct rte_mem_resource *rsc, uint32_t index) -{ - if (index >= szedata2_obuf_count) - index = szedata2_obuf_count - 1; - return SZEDATA2_PCI_RESOURCE_PTR(rsc, szedata2_obuf_base_table[index], - struct szedata2_obuf *); -} - -/** - * Checks if IBUF is enabled. - * - * @param ibuf - * Pointer to IBUF structure. - * @return - * true if IBUF is enabled. - * false if IBUF is disabled. - */ -static inline bool -ibuf_is_enabled(const volatile struct szedata2_ibuf *ibuf) -{ - return ((szedata2_read32(&ibuf->ibuf_en) & 0x1) != 0) ? true : false; -} - -/** - * Enables IBUF. - * - * @param ibuf - * Pointer to IBUF structure. - */ -static inline void -ibuf_enable(volatile struct szedata2_ibuf *ibuf) -{ - szedata2_write32(szedata2_read32(&ibuf->ibuf_en) | 0x1, &ibuf->ibuf_en); -} - -/** - * Disables IBUF. - * - * @param ibuf - * Pointer to IBUF structure. - */ -static inline void -ibuf_disable(volatile struct szedata2_ibuf *ibuf) -{ - szedata2_write32(szedata2_read32(&ibuf->ibuf_en) & ~0x1, - &ibuf->ibuf_en); -} - -/** - * Checks if link is up. - * - * @param ibuf - * Pointer to IBUF structure. - * @return - * true if ibuf link is up. - * false if ibuf link is down. - */ -static inline bool -ibuf_is_link_up(const volatile struct szedata2_ibuf *ibuf) -{ - return ((szedata2_read32(&ibuf->ibuf_st) & 0x80) != 0) ? true : false; -} - -/** - * Get current MAC address check mode from IBUF. - * - * @param ibuf - * Pointer to IBUF structure. - * @return - * MAC address check mode constant. - */ -static inline enum szedata2_mac_check_mode -ibuf_mac_mode_read(const volatile struct szedata2_ibuf *ibuf) -{ - switch (szedata2_read32(&ibuf->mac_chmode) & 0x3) { - case 0x0: - return SZEDATA2_MAC_CHMODE_PROMISC; - case 0x1: - return SZEDATA2_MAC_CHMODE_ONLY_VALID; - case 0x2: - return SZEDATA2_MAC_CHMODE_ALL_BROADCAST; - case 0x3: - return SZEDATA2_MAC_CHMODE_ALL_MULTICAST; - default: - return SZEDATA2_MAC_CHMODE_PROMISC; - } -} - -/** - * Writes mode in MAC address check mode register in IBUF. - * - * @param ibuf - * Pointer to IBUF structure. - * @param mode - * MAC address check mode to set. - */ -static inline void -ibuf_mac_mode_write(volatile struct szedata2_ibuf *ibuf, - enum szedata2_mac_check_mode mode) -{ - szedata2_write32((szedata2_read32(&ibuf->mac_chmode) & ~0x3) | mode, - &ibuf->mac_chmode); -} - -/** - * Checks if obuf is enabled. - * - * @param obuf - * Pointer to OBUF structure. - * @return - * true if OBUF is enabled. - * false if OBUF is disabled. - */ -static inline bool -obuf_is_enabled(const volatile struct szedata2_obuf *obuf) -{ - return ((szedata2_read32(&obuf->obuf_en) & 0x1) != 0) ? true : false; -} - -/** - * Enables OBUF. - * - * @param obuf - * Pointer to OBUF structure. - */ -static inline void -obuf_enable(volatile struct szedata2_obuf *obuf) -{ - szedata2_write32(szedata2_read32(&obuf->obuf_en) | 0x1, &obuf->obuf_en); -} - -/** - * Disables OBUF. - * - * @param obuf - * Pointer to OBUF structure. - */ -static inline void -obuf_disable(volatile struct szedata2_obuf *obuf) -{ - szedata2_write32(szedata2_read32(&obuf->obuf_en) & ~0x1, - &obuf->obuf_en); -} - -#endif /* _SZEDATA2_IOBUF_H_ */ diff --git a/drivers/net/szedata2/szedata2_logs.h b/drivers/net/szedata2/szedata2_logs.h new file mode 100644 index 00000000..8d06ffa3 --- /dev/null +++ b/drivers/net/szedata2/szedata2_logs.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 CESNET + */ + +#ifndef _SZEDATA2_LOGS_H_ +#define _SZEDATA2_LOGS_H_ + +#include + +extern int szedata2_logtype_init; +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, szedata2_logtype_init, \ + "%s(): " fmt "\n", __func__, ## args) + +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +extern int szedata2_logtype_driver; +#define PMD_DRV_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, szedata2_logtype_driver, \ + "%s(): " fmt "\n", __func__, ## args) + +#endif /* _SZEDATA2_LOGS_H_ */ diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c index f09db0ea..5531fe9d 100644 --- a/drivers/net/tap/rte_eth_tap.c +++ b/drivers/net/tap/rte_eth_tap.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -34,6 +35,7 @@ #include #include +#include #include #include #include @@ -42,13 +44,19 @@ /* Linux based path to the TUN device */ #define TUN_TAP_DEV_PATH "/dev/net/tun" #define DEFAULT_TAP_NAME "dtap" +#define DEFAULT_TUN_NAME "dtun" #define ETH_TAP_IFACE_ARG "iface" #define ETH_TAP_REMOTE_ARG "remote" #define ETH_TAP_MAC_ARG "mac" #define ETH_TAP_MAC_FIXED "fixed" +#define ETH_TAP_USR_MAC_FMT "xx:xx:xx:xx:xx:xx" +#define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef" +#define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT + static struct rte_vdev_driver pmd_tap_drv; +static struct rte_vdev_driver pmd_tun_drv; static const char *valid_arguments[] = { ETH_TAP_IFACE_ARG, @@ -58,6 +66,9 @@ static const char *valid_arguments[] = { }; static int tap_unit; +static unsigned int tun_unit; + +static char tuntap_name[8]; static volatile uint32_t tap_trigger; /* Rx trigger */ @@ -65,7 +76,7 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG + .link_autoneg = ETH_LINK_FIXED, }; static void @@ -84,13 +95,20 @@ enum ioctl_mode { static int tap_intr_handle_set(struct rte_eth_dev *dev, int set); -/* Tun/Tap allocation routine +/** + * Tun/Tap allocation routine + * + * @param[in] pmd + * Pointer to private structure. * - * name is the number of the interface to use, unless NULL to take the host - * supplied name. + * @param[in] is_keepalive + * Keepalive flag + * + * @return + * -1 on failure, fd on success */ static int -tun_alloc(struct pmd_internals *pmd) +tun_alloc(struct pmd_internals *pmd, int is_keepalive) { struct ifreq ifr; #ifdef IFF_MULTI_QUEUE @@ -104,51 +122,64 @@ tun_alloc(struct pmd_internals *pmd) * Do not set IFF_NO_PI as packet information header will be needed * to check if a received packet has been truncated. */ - ifr.ifr_flags = IFF_TAP; + ifr.ifr_flags = (pmd->type == ETH_TUNTAP_TYPE_TAP) ? + IFF_TAP : IFF_TUN | IFF_POINTOPOINT; snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name); - RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name); + TAP_LOG(DEBUG, "ifr_name '%s'", ifr.ifr_name); fd = open(TUN_TAP_DEV_PATH, O_RDWR); if (fd < 0) { - RTE_LOG(ERR, PMD, "Unable to create TAP interface\n"); + TAP_LOG(ERR, "Unable to create %s interface", tuntap_name); goto error; } #ifdef IFF_MULTI_QUEUE /* Grab the TUN features to verify we can work multi-queue */ if (ioctl(fd, TUNGETFEATURES, &features) < 0) { - RTE_LOG(ERR, PMD, "TAP unable to get TUN/TAP features\n"); + TAP_LOG(ERR, "%s unable to get TUN/TAP features", + tuntap_name); goto error; } - RTE_LOG(DEBUG, PMD, " TAP Features %08x\n", features); + TAP_LOG(DEBUG, "%s Features %08x", tuntap_name, features); if (features & IFF_MULTI_QUEUE) { - RTE_LOG(DEBUG, PMD, " Multi-queue support for %d queues\n", + TAP_LOG(DEBUG, " Multi-queue support for %d queues", RTE_PMD_TAP_MAX_QUEUES); ifr.ifr_flags |= IFF_MULTI_QUEUE; } else #endif { ifr.ifr_flags |= IFF_ONE_QUEUE; - RTE_LOG(DEBUG, PMD, " Single queue only support\n"); + TAP_LOG(DEBUG, " Single queue only support"); } /* Set the TUN/TAP configuration and set the name if needed */ if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) { - RTE_LOG(WARNING, PMD, - "Unable to set TUNSETIFF for %s\n", - ifr.ifr_name); - perror("TUNSETIFF"); + TAP_LOG(WARNING, "Unable to set TUNSETIFF for %s: %s", + ifr.ifr_name, strerror(errno)); goto error; } + if (is_keepalive) { + /* + * Detach the TUN/TAP keep-alive queue + * to avoid traffic through it + */ + ifr.ifr_flags = IFF_DETACH_QUEUE; + if (ioctl(fd, TUNSETQUEUE, (void *)&ifr) < 0) { + TAP_LOG(WARNING, + "Unable to detach keep-alive queue for %s: %s", + ifr.ifr_name, strerror(errno)); + goto error; + } + } + /* Always set the file descriptor to non-blocking */ if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) { - RTE_LOG(WARNING, PMD, - "Unable to set %s to nonblocking\n", - ifr.ifr_name); - perror("F_SETFL, NONBLOCK"); + TAP_LOG(WARNING, + "Unable to set %s to nonblocking: %s", + ifr.ifr_name, strerror(errno)); goto error; } @@ -182,10 +213,11 @@ tun_alloc(struct pmd_internals *pmd) fcntl(fd, F_SETFL, flags | O_ASYNC); fcntl(fd, F_SETOWN, getpid()); } while (0); + if (errno) { /* Disable trigger globally in case of error */ tap_trigger = 0; - RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n", + TAP_LOG(WARNING, "Rx trigger disabled: %s", strerror(errno)); } @@ -255,14 +287,9 @@ static uint64_t tap_rx_offload_get_port_capa(void) { /* - * In order to support legacy apps, - * report capabilities also as port capabilities. + * No specific port Rx offload capabilities. */ - return DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_CRC_STRIP; + return 0; } static uint64_t @@ -275,21 +302,6 @@ tap_rx_offload_get_queue_capa(void) DEV_RX_OFFLOAD_CRC_STRIP; } -static bool -tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads) -{ - uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; - uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa(); - uint64_t port_supp_offloads = tap_rx_offload_get_port_capa(); - - if ((offloads & (queue_supp_offloads | port_supp_offloads)) != - offloads) - return false; - if ((port_offloads ^ offloads) & port_supp_offloads) - return false; - return true; -} - /* Callback to handle the rx burst of packets to the correct interface and * file descriptor(s) in a multi-queue setup. */ @@ -389,13 +401,9 @@ static uint64_t tap_tx_offload_get_port_capa(void) { /* - * In order to support legacy apps, - * report capabilities also as port capabilities. + * No specific port Tx offload capabilities. */ - return DEV_TX_OFFLOAD_MULTI_SEGS | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM; + return 0; } static uint64_t @@ -407,22 +415,6 @@ tap_tx_offload_get_queue_capa(void) DEV_TX_OFFLOAD_TCP_CKSUM; } -static bool -tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads) -{ - uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; - uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa(); - uint64_t port_supp_offloads = tap_tx_offload_get_port_capa(); - - if ((offloads & (queue_supp_offloads | port_supp_offloads)) != - offloads) - return false; - /* Verify we have no conflict with port offloads */ - if ((port_offloads ^ offloads) & port_supp_offloads) - return false; - return true; -} - static void tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len, unsigned int l3_len) @@ -491,7 +483,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) for (i = 0; i < nb_pkts; i++) { struct rte_mbuf *mbuf = bufs[num_tx]; struct iovec iovecs[mbuf->nb_segs + 1]; - struct tun_pi pi = { .flags = 0 }; + struct tun_pi pi = { .flags = 0, .proto = 0x00 }; struct rte_mbuf *seg = mbuf; char m_copy[mbuf->data_len]; int n; @@ -501,6 +493,23 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) if (rte_pktmbuf_pkt_len(mbuf) > max_size) break; + if (txq->type == ETH_TUNTAP_TYPE_TUN) { + /* + * TUN and TAP are created with IFF_NO_PI disabled. + * For TUN PMD this mandatory as fields are used by + * Kernel tun.c to determine whether its IP or non IP + * packets. + * + * The logic fetches the first byte of data from mbuf + * then compares whether its v4 or v6. If first byte + * is 4 or 6, then protocol field is updated. + */ + char *buff_data = rte_pktmbuf_mtod(seg, void *); + j = (*buff_data & 0xf0); + pi.proto = (j == 0x40) ? rte_cpu_to_be_16(ETHER_TYPE_IPv4) : + (j == 0x60) ? rte_cpu_to_be_16(ETHER_TYPE_IPv6) : 0x00; + } + iovecs[0].iov_base = π iovecs[0].iov_len = sizeof(pi); for (j = 1; j <= mbuf->nb_segs; j++) { @@ -593,7 +602,9 @@ apply: case SIOCSIFMTU: break; default: - RTE_ASSERT(!"unsupported request type: must not happen"); + RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n", + pmd->name); + return -EINVAL; } if (ioctl(pmd->ioctl_sock, request, ifr) < 0) goto error; @@ -602,8 +613,8 @@ apply: return 0; error: - RTE_LOG(DEBUG, PMD, "%s: %s(%s) failed: %s(%d)\n", ifr->ifr_name, - __func__, tap_ioctl_req2str(request), strerror(errno), errno); + TAP_LOG(DEBUG, "%s(%s) failed: %s(%d)", ifr->ifr_name, + tap_ioctl_req2str(request), strerror(errno), errno); return -errno; } @@ -650,39 +661,28 @@ tap_dev_stop(struct rte_eth_dev *dev) static int tap_dev_configure(struct rte_eth_dev *dev) { - uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa(); - uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; - - if ((tx_offloads & supp_tx_offloads) != tx_offloads) { - rte_errno = ENOTSUP; - RTE_LOG(ERR, PMD, - "Some Tx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - tx_offloads, supp_tx_offloads); - return -rte_errno; - } if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) { - RTE_LOG(ERR, PMD, - "%s: number of rx queues %d exceeds max num of queues %d\n", + TAP_LOG(ERR, + "%s: number of rx queues %d exceeds max num of queues %d", dev->device->name, dev->data->nb_rx_queues, RTE_PMD_TAP_MAX_QUEUES); return -1; } if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) { - RTE_LOG(ERR, PMD, - "%s: number of tx queues %d exceeds max num of queues %d\n", + TAP_LOG(ERR, + "%s: number of tx queues %d exceeds max num of queues %d", dev->device->name, dev->data->nb_tx_queues, RTE_PMD_TAP_MAX_QUEUES); return -1; } - RTE_LOG(INFO, PMD, "%s: %p: TX configured queues number: %u\n", - dev->device->name, (void *)dev, dev->data->nb_tx_queues); + TAP_LOG(INFO, "%s: %p: TX configured queues number: %u", + dev->device->name, (void *)dev, dev->data->nb_tx_queues); - RTE_LOG(INFO, PMD, "%s: %p: RX configured queues number: %u\n", - dev->device->name, (void *)dev, dev->data->nb_rx_queues); + TAP_LOG(INFO, "%s: %p: RX configured queues number: %u", + dev->device->name, (void *)dev, dev->data->nb_rx_queues); return 0; } @@ -732,7 +732,6 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES; dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES; dev_info->min_rx_bufsize = 0; - dev_info->pci_dev = NULL; dev_info->speed_capa = tap_dev_speed_capa(); dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa(); dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() | @@ -740,6 +739,12 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa(); dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() | dev_info->tx_queue_offload_capa; + dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE; + /* + * limitation: TAP supports all of IP, UDP and TCP hash + * functions together and not in partial combinations + */ + dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK; } static int @@ -830,6 +835,15 @@ tap_dev_close(struct rte_eth_dev *dev) ioctl(internals->ioctl_sock, SIOCSIFFLAGS, &internals->remote_initial_flags); } + + if (internals->ka_fd != -1) { + close(internals->ka_fd); + internals->ka_fd = -1; + } + /* + * Since TUN device has no more opened file descriptors + * it will be removed from kernel + */ } static void @@ -929,48 +943,64 @@ tap_allmulti_disable(struct rte_eth_dev *dev) tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI); } -static void +static int tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct pmd_internals *pmd = dev->data->dev_private; enum ioctl_mode mode = LOCAL_ONLY; struct ifreq ifr; + int ret; + + if (pmd->type == ETH_TUNTAP_TYPE_TUN) { + TAP_LOG(ERR, "%s: can't MAC address for TUN", + dev->device->name); + return -ENOTSUP; + } if (is_zero_ether_addr(mac_addr)) { - RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n", + TAP_LOG(ERR, "%s: can't set an empty MAC address", dev->device->name); - return; + return -EINVAL; } /* Check the actual current MAC address on the tap netdevice */ - if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) - return; + ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY); + if (ret < 0) + return ret; if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data, mac_addr)) - return; + return 0; /* Check the current MAC address on the remote */ - if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) - return; + ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY); + if (ret < 0) + return ret; if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data, mac_addr)) mode = LOCAL_AND_REMOTE; ifr.ifr_hwaddr.sa_family = AF_LOCAL; rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN); - if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode) < 0) - return; + ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode); + if (ret < 0) + return ret; rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN); if (pmd->remote_if_index && !pmd->flow_isolate) { /* Replace MAC redirection rule after a MAC change */ - if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) { - RTE_LOG(ERR, PMD, - "%s: Couldn't delete MAC redirection rule\n", + ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC); + if (ret < 0) { + TAP_LOG(ERR, + "%s: Couldn't delete MAC redirection rule", dev->device->name); - return; + return ret; } - if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0) - RTE_LOG(ERR, PMD, - "%s: Couldn't add MAC redirection rule\n", + ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC); + if (ret < 0) { + TAP_LOG(ERR, + "%s: Couldn't add MAC redirection rule", dev->device->name); + return ret; + } } + + return 0; } static int @@ -997,35 +1027,35 @@ tap_setup_queue(struct rte_eth_dev *dev, } if (*fd != -1) { /* fd for this queue already exists */ - RTE_LOG(DEBUG, PMD, "%s: fd %d for %s queue qid %d exists\n", + TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists", pmd->name, *fd, dir, qid); } else if (*other_fd != -1) { /* Only other_fd exists. dup it */ *fd = dup(*other_fd); if (*fd < 0) { *fd = -1; - RTE_LOG(ERR, PMD, "%s: dup() failed.\n", - pmd->name); + TAP_LOG(ERR, "%s: dup() failed.", pmd->name); return -1; } - RTE_LOG(DEBUG, PMD, "%s: dup fd %d for %s queue qid %d (%d)\n", + TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)", pmd->name, *other_fd, dir, qid, *fd); } else { /* Both RX and TX fds do not exist (equal -1). Create fd */ - *fd = tun_alloc(pmd); + *fd = tun_alloc(pmd, 0); if (*fd < 0) { *fd = -1; /* restore original value */ - RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n", - pmd->name); + TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name); return -1; } - RTE_LOG(DEBUG, PMD, "%s: add %s queue for qid %d fd %d\n", + TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d", pmd->name, dir, qid, *fd); } tx->mtu = &dev->data->mtu; rx->rxmode = &dev->data->dev_conf.rxmode; + tx->type = pmd->type; + return *fd; } @@ -1049,25 +1079,12 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, int i; if (rx_queue_id >= dev->data->nb_rx_queues || !mp) { - RTE_LOG(WARNING, PMD, - "nb_rx_queues %d too small or mempool NULL\n", + TAP_LOG(WARNING, + "nb_rx_queues %d too small or mempool NULL", dev->data->nb_rx_queues); return -1; } - /* Verify application offloads are valid for our port and queue. */ - if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) { - rte_errno = ENOTSUP; - RTE_LOG(ERR, PMD, - "%p: Rx queue offloads 0x%" PRIx64 - " don't match port offloads 0x%" PRIx64 - " or supported offloads 0x%" PRIx64 "\n", - (void *)dev, rx_conf->offloads, - dev->data->dev_conf.rxmode.offloads, - (tap_rx_offload_get_port_capa() | - tap_rx_offload_get_queue_capa())); - return -rte_errno; - } rxq->mp = mp; rxq->trigger_seen = 1; /* force initial burst */ rxq->in_port = dev->data->port_id; @@ -1075,8 +1092,8 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0, socket_id); if (!iovecs) { - RTE_LOG(WARNING, PMD, - "%s: Couldn't allocate %d RX descriptors\n", + TAP_LOG(WARNING, + "%s: Couldn't allocate %d RX descriptors", dev->device->name, nb_desc); return -ENOMEM; } @@ -1095,8 +1112,8 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, for (i = 1; i <= nb_desc; i++) { *tmp = rte_pktmbuf_alloc(rxq->mp); if (!*tmp) { - RTE_LOG(WARNING, PMD, - "%s: couldn't allocate memory for queue %d\n", + TAP_LOG(WARNING, + "%s: couldn't allocate memory for queue %d", dev->device->name, rx_queue_id); ret = -ENOMEM; goto error; @@ -1108,7 +1125,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, tmp = &(*tmp)->next; } - RTE_LOG(DEBUG, PMD, " RX TAP device name %s, qid %d on fd %d\n", + TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d", internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd); return 0; @@ -1131,39 +1148,24 @@ tap_tx_queue_setup(struct rte_eth_dev *dev, struct pmd_internals *internals = dev->data->dev_private; struct tx_queue *txq; int ret; + uint64_t offloads; if (tx_queue_id >= dev->data->nb_tx_queues) return -1; dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id]; txq = dev->data->tx_queues[tx_queue_id]; - /* - * Don't verify port offloads for application which - * use the old API. - */ - if (tx_conf != NULL && - !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) { - if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) { - txq->csum = !!(tx_conf->offloads & - (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM)); - } else { - rte_errno = ENOTSUP; - RTE_LOG(ERR, PMD, - "%p: Tx queue offloads 0x%" PRIx64 - " don't match port offloads 0x%" PRIx64 - " or supported offloads 0x%" PRIx64, - (void *)dev, tx_conf->offloads, - dev->data->dev_conf.txmode.offloads, - tap_tx_offload_get_port_capa()); - return -rte_errno; - } - } + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + txq->csum = !!(offloads & + (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM)); + ret = tap_setup_queue(dev, internals, tx_queue_id, 0); if (ret == -1) return -1; - RTE_LOG(DEBUG, PMD, - " TX TAP device name %s, qid %d on fd %d csum %s\n", + TAP_LOG(DEBUG, + " TX TUNTAP device name %s, qid %d on fd %d csum %s", internals->name, tx_queue_id, internals->txq[tx_queue_id].fd, txq->csum ? "on" : "off"); @@ -1307,6 +1309,39 @@ tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused, return 0; } +/** + * DPDK callback to update the RSS hash configuration. + * + * @param dev + * Pointer to Ethernet device structure. + * @param[in] rss_conf + * RSS configuration data. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +tap_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + if (rss_conf->rss_hf & TAP_RSS_HF_MASK) { + rte_errno = EINVAL; + return -rte_errno; + } + if (rss_conf->rss_key && rss_conf->rss_key_len) { + /* + * Currently TAP RSS key is hard coded + * and cannot be updated + */ + TAP_LOG(ERR, + "port %u RSS key cannot be updated", + dev->data->port_id); + rte_errno = EINVAL; + return -rte_errno; + } + return 0; +} + static const struct eth_dev_ops ops = { .dev_start = tap_dev_start, .dev_stop = tap_dev_stop, @@ -1332,12 +1367,14 @@ static const struct eth_dev_ops ops = { .stats_get = tap_stats_get, .stats_reset = tap_stats_reset, .dev_supported_ptypes_get = tap_dev_supported_ptypes_get, + .rss_hash_update = tap_rss_hash_update, .filter_ctrl = tap_dev_filter_ctrl, }; static int eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, - char *remote_iface, int fixed_mac_type) + char *remote_iface, struct ether_addr *mac_addr, + enum rte_tuntap_type type) { int numa_node = rte_socket_id(); struct rte_eth_dev *dev; @@ -1346,34 +1383,31 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, struct ifreq ifr; int i; - RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id()); - - data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node); - if (!data) { - RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n"); - goto error_exit_nodev; - } + TAP_LOG(DEBUG, "%s device on numa %u", + tuntap_name, rte_socket_id()); dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd)); if (!dev) { - RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n"); + TAP_LOG(ERR, "%s Unable to allocate device struct", + tuntap_name); goto error_exit_nodev; } pmd = dev->data->dev_private; pmd->dev = dev; snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name); + pmd->type = type; pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0); if (pmd->ioctl_sock == -1) { - RTE_LOG(ERR, PMD, - "TAP Unable to get a socket for management: %s\n", - strerror(errno)); + TAP_LOG(ERR, + "%s Unable to get a socket for management: %s", + tuntap_name, strerror(errno)); goto error_exit; } /* Setup some default values */ - rte_memcpy(data, dev->data, sizeof(*data)); + data = dev->data; data->dev_private = pmd; data->dev_flags = RTE_ETH_DEV_INTR_LSC; data->numa_node = numa_node; @@ -1384,7 +1418,6 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, data->nb_rx_queues = 0; data->nb_tx_queues = 0; - dev->data = data; dev->dev_ops = &ops; dev->rx_pkt_burst = pmd_rx_burst; dev->tx_pkt_burst = pmd_tx_burst; @@ -1394,39 +1427,43 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, dev->intr_handle = &pmd->intr_handle; /* Presetup the fds to -1 as being not valid */ + pmd->ka_fd = -1; for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { pmd->rxq[i].fd = -1; pmd->txq[i].fd = -1; } - if (fixed_mac_type) { - /* fixed mac = 00:64:74:61:70: */ - static int iface_idx; - char mac[ETHER_ADDR_LEN] = "\0dtap"; - - mac[ETHER_ADDR_LEN - 1] = iface_idx++; - rte_memcpy(&pmd->eth_addr, mac, ETHER_ADDR_LEN); - } else { - eth_random_addr((uint8_t *)&pmd->eth_addr); + if (pmd->type == ETH_TUNTAP_TYPE_TAP) { + if (is_zero_ether_addr(mac_addr)) + eth_random_addr((uint8_t *)&pmd->eth_addr); + else + rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr)); } - /* Immediately create the netdevice (this will create the 1st queue). */ - /* rx queue */ - if (tap_setup_queue(dev, pmd, 0, 1) == -1) - goto error_exit; - /* tx queue */ - if (tap_setup_queue(dev, pmd, 0, 0) == -1) + /* + * Allocate a TUN device keep-alive file descriptor that will only be + * closed when the TUN device itself is closed or removed. + * This keep-alive file descriptor will guarantee that the TUN device + * exists even when all of its queues are closed + */ + pmd->ka_fd = tun_alloc(pmd, 1); + if (pmd->ka_fd == -1) { + TAP_LOG(ERR, "Unable to create %s interface", tuntap_name); goto error_exit; + } ifr.ifr_mtu = dev->data->mtu; if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0) goto error_exit; - memset(&ifr, 0, sizeof(struct ifreq)); - ifr.ifr_hwaddr.sa_family = AF_LOCAL; - rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, ETHER_ADDR_LEN); - if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) - goto error_exit; + if (pmd->type == ETH_TUNTAP_TYPE_TAP) { + memset(&ifr, 0, sizeof(struct ifreq)); + ifr.ifr_hwaddr.sa_family = AF_LOCAL; + rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, + ETHER_ADDR_LEN); + if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) + goto error_exit; + } /* * Set up everything related to rte_flow: @@ -1438,22 +1475,22 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, */ pmd->nlsk_fd = tap_nl_init(0); if (pmd->nlsk_fd == -1) { - RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n", + TAP_LOG(WARNING, "%s: failed to create netlink socket.", pmd->name); goto disable_rte_flow; } pmd->if_index = if_nametoindex(pmd->name); if (!pmd->if_index) { - RTE_LOG(ERR, PMD, "%s: failed to get if_index.\n", pmd->name); + TAP_LOG(ERR, "%s: failed to get if_index.", pmd->name); goto disable_rte_flow; } if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) { - RTE_LOG(ERR, PMD, "%s: failed to create multiq qdisc.\n", + TAP_LOG(ERR, "%s: failed to create multiq qdisc.", pmd->name); goto disable_rte_flow; } if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) { - RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n", + TAP_LOG(ERR, "%s: failed to create ingress qdisc.", pmd->name); goto disable_rte_flow; } @@ -1462,7 +1499,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, if (strlen(remote_iface)) { pmd->remote_if_index = if_nametoindex(remote_iface); if (!pmd->remote_if_index) { - RTE_LOG(ERR, PMD, "%s: failed to get %s if_index.\n", + TAP_LOG(ERR, "%s: failed to get %s if_index.", pmd->name, remote_iface); goto error_remote; } @@ -1474,7 +1511,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, /* Replicate remote MAC address */ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) { - RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n", + TAP_LOG(ERR, "%s: failed to get %s MAC address.", pmd->name, pmd->remote_iface); goto error_remote; } @@ -1482,7 +1519,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, ETHER_ADDR_LEN); /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) { - RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n", + TAP_LOG(ERR, "%s: failed to get %s MAC address.", pmd->name, remote_iface); goto error_remote; } @@ -1495,7 +1532,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index); if (qdisc_create_ingress(pmd->nlsk_fd, pmd->remote_if_index) < 0) { - RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n", + TAP_LOG(ERR, "%s: failed to create ingress qdisc.", pmd->remote_iface); goto error_remote; } @@ -1504,26 +1541,27 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 || tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 || tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) { - RTE_LOG(ERR, PMD, - "%s: failed to create implicit rules.\n", + TAP_LOG(ERR, + "%s: failed to create implicit rules.", pmd->name); goto error_remote; } } + rte_eth_dev_probing_finish(dev); return 0; disable_rte_flow: - RTE_LOG(ERR, PMD, " Disabling rte flow support: %s(%d)\n", + TAP_LOG(ERR, " Disabling rte flow support: %s(%d)", strerror(errno), errno); if (strlen(remote_iface)) { - RTE_LOG(ERR, PMD, "Remote feature requires flow support.\n"); + TAP_LOG(ERR, "Remote feature requires flow support."); goto error_exit; } return 0; error_remote: - RTE_LOG(ERR, PMD, " Can't set up remote feature: %s(%d)\n", + TAP_LOG(ERR, " Can't set up remote feature: %s(%d)", strerror(errno), errno); tap_flow_implicit_flush(pmd, NULL); @@ -1533,10 +1571,9 @@ error_exit: rte_eth_dev_release_port(dev); error_exit_nodev: - RTE_LOG(ERR, PMD, "TAP Unable to initialize %s\n", - rte_vdev_device_name(vdev)); + TAP_LOG(ERR, "%s Unable to initialize %s", + tuntap_name, rte_vdev_device_name(vdev)); - rte_free(data); return -EINVAL; } @@ -1548,7 +1585,7 @@ set_interface_name(const char *key __rte_unused, char *name = (char *)extra_args; if (value) - snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s", value); + strlcpy(name, value, RTE_ETH_NAME_MAX_LEN - 1); else snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d", DEFAULT_TAP_NAME, (tap_unit - 1)); @@ -1564,20 +1601,134 @@ set_remote_iface(const char *key __rte_unused, char *name = (char *)extra_args; if (value) - snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s", value); + strlcpy(name, value, RTE_ETH_NAME_MAX_LEN); return 0; } +static int parse_user_mac(struct ether_addr *user_mac, + const char *value) +{ + unsigned int index = 0; + char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL; + + if (user_mac == NULL || value == NULL) + return 0; + + strlcpy(mac_temp, value, sizeof(mac_temp)); + mac_byte = strtok(mac_temp, ":"); + + while ((mac_byte != NULL) && + (strlen(mac_byte) <= 2) && + (strlen(mac_byte) == strspn(mac_byte, + ETH_TAP_CMP_MAC_FMT))) { + user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16); + mac_byte = strtok(NULL, ":"); + } + + return index; +} + static int set_mac_type(const char *key __rte_unused, const char *value, void *extra_args) { - if (value && - !strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) - *(int *)extra_args = 1; + struct ether_addr *user_mac = extra_args; + + if (!value) + return 0; + + if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) { + static int iface_idx; + + /* fixed mac = 00:64:74:61:70: */ + memcpy((char *)user_mac->addr_bytes, "\0dtap", ETHER_ADDR_LEN); + user_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0'; + goto success; + } + + if (parse_user_mac(user_mac, value) != 6) + goto error; +success: + TAP_LOG(DEBUG, "TAP user MAC param (%s)", value); return 0; + +error: + TAP_LOG(ERR, "TAP user MAC (%s) is not in format (%s|%s)", + value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT); + return -1; +} + +/* + * Open a TUN interface device. TUN PMD + * 1) sets tap_type as false + * 2) intakes iface as argument. + * 3) as interface is virtual set speed to 10G + */ +static int +rte_pmd_tun_probe(struct rte_vdev_device *dev) +{ + const char *name, *params; + int ret; + struct rte_kvargs *kvlist = NULL; + char tun_name[RTE_ETH_NAME_MAX_LEN]; + char remote_iface[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; + + strcpy(tuntap_name, "TUN"); + + name = rte_vdev_device_name(dev); + params = rte_vdev_device_args(dev); + memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(params) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + TAP_LOG(ERR, "Failed to probe %s", name); + return -1; + } + eth_dev->dev_ops = &ops; + return 0; + } + + snprintf(tun_name, sizeof(tun_name), "%s%u", + DEFAULT_TUN_NAME, tun_unit++); + + if (params && (params[0] != '\0')) { + TAP_LOG(DEBUG, "parameters (%s)", params); + + kvlist = rte_kvargs_parse(params, valid_arguments); + if (kvlist) { + if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) { + ret = rte_kvargs_process(kvlist, + ETH_TAP_IFACE_ARG, + &set_interface_name, + tun_name); + + if (ret == -1) + goto leave; + } + } + } + pmd_link.link_speed = ETH_SPEED_NUM_10G; + + TAP_LOG(NOTICE, "Initializing pmd_tun for %s as %s", + name, tun_name); + + ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0, + ETH_TUNTAP_TYPE_TUN); + +leave: + if (ret == -1) { + TAP_LOG(ERR, "Failed to create pmd for %s as %s", + name, tun_name); + tun_unit--; /* Restore the unit number */ + } + rte_kvargs_free(kvlist); + + return ret; } /* Open a TAP interface device. @@ -1591,18 +1742,34 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) int speed; char tap_name[RTE_ETH_NAME_MAX_LEN]; char remote_iface[RTE_ETH_NAME_MAX_LEN]; - int fixed_mac_type = 0; + struct ether_addr user_mac = { .addr_bytes = {0} }; + struct rte_eth_dev *eth_dev; + + strcpy(tuntap_name, "TAP"); name = rte_vdev_device_name(dev); params = rte_vdev_device_args(dev); + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(params) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + TAP_LOG(ERR, "Failed to probe %s", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + speed = ETH_SPEED_NUM_10G; snprintf(tap_name, sizeof(tap_name), "%s%d", DEFAULT_TAP_NAME, tap_unit++); memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN); if (params && (params[0] != '\0')) { - RTE_LOG(DEBUG, PMD, "parameters (%s)\n", params); + TAP_LOG(DEBUG, "parameters (%s)", params); kvlist = rte_kvargs_parse(params, valid_arguments); if (kvlist) { @@ -1628,7 +1795,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) ret = rte_kvargs_process(kvlist, ETH_TAP_MAC_ARG, &set_mac_type, - &fixed_mac_type); + &user_mac); if (ret == -1) goto leave; } @@ -1636,14 +1803,15 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) } pmd_link.link_speed = speed; - RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n", + TAP_LOG(NOTICE, "Initializing pmd_tap for %s as %s", name, tap_name); - ret = eth_dev_tap_create(dev, tap_name, remote_iface, fixed_mac_type); + ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac, + ETH_TUNTAP_TYPE_TAP); leave: if (ret == -1) { - RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n", + TAP_LOG(ERR, "Failed to create pmd for %s as %s", name, tap_name); tap_unit--; /* Restore the unit number */ } @@ -1652,7 +1820,7 @@ leave: return ret; } -/* detach a TAP device. +/* detach a TUNTAP device. */ static int rte_pmd_tap_remove(struct rte_vdev_device *dev) @@ -1661,15 +1829,17 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) struct pmd_internals *internals; int i; - RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n", - rte_socket_id()); - /* find the ethdev entry */ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev)); if (!eth_dev) return 0; internals = eth_dev->data->dev_private; + + TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u", + (internals->type == ETH_TUNTAP_TYPE_TAP) ? "TAP" : "TUN", + rte_socket_id()); + if (internals->nlsk_fd) { tap_flow_flush(eth_dev, NULL); tap_flow_implicit_flush(internals, NULL); @@ -1688,20 +1858,41 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) close(internals->ioctl_sock); rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data); - rte_eth_dev_release_port(eth_dev); + if (internals->ka_fd != -1) { + close(internals->ka_fd); + internals->ka_fd = -1; + } return 0; } +static struct rte_vdev_driver pmd_tun_drv = { + .probe = rte_pmd_tun_probe, + .remove = rte_pmd_tap_remove, +}; + static struct rte_vdev_driver pmd_tap_drv = { .probe = rte_pmd_tap_probe, .remove = rte_pmd_tap_remove, }; + RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv); +RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv); RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap); +RTE_PMD_REGISTER_PARAM_STRING(net_tun, + ETH_TAP_IFACE_ARG "= "); RTE_PMD_REGISTER_PARAM_STRING(net_tap, ETH_TAP_IFACE_ARG "= " - ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_FIXED " " + ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " " ETH_TAP_REMOTE_ARG "="); +int tap_logtype; + +RTE_INIT(tap_init_log); +static void +tap_init_log(void) +{ + tap_logtype = rte_log_register("pmd.net.tap"); + if (tap_logtype >= 0) + rte_log_set_level(tap_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h index 53a506ad..7b21d0d8 100644 --- a/drivers/net/tap/rte_eth_tap.h +++ b/drivers/net/tap/rte_eth_tap.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef _RTE_ETH_TAP_H_ @@ -15,6 +15,7 @@ #include #include +#include "tap_log.h" #ifdef IFF_MULTI_QUEUE #define RTE_PMD_TAP_MAX_QUEUES TAP_MAX_QUEUES @@ -22,6 +23,13 @@ #define RTE_PMD_TAP_MAX_QUEUES 1 #endif +enum rte_tuntap_type { + ETH_TUNTAP_TYPE_UNKNOWN, + ETH_TUNTAP_TYPE_TUN, + ETH_TUNTAP_TYPE_TAP, + ETH_TUNTAP_TYPE_MAX, +}; + struct pkt_stats { uint64_t opackets; /* Number of output packets */ uint64_t ipackets; /* Number of input packets */ @@ -47,6 +55,7 @@ struct rx_queue { struct tx_queue { int fd; + int type; /* Type field - TUN|TAP */ uint16_t *mtu; /* Pointer to MTU from dev_data */ uint16_t csum:1; /* Enable checksum offloading */ struct pkt_stats stats; /* Stats for this TX queue */ @@ -56,6 +65,7 @@ struct pmd_internals { struct rte_eth_dev *dev; /* Ethernet device. */ char remote_iface[RTE_ETH_NAME_MAX_LEN]; /* Remote netdevice name */ char name[RTE_ETH_NAME_MAX_LEN]; /* Internal Tap device name */ + int type; /* Type field - TUN|TAP */ struct ether_addr eth_addr; /* Mac address of the device port */ struct ifreq remote_initial_flags; /* Remote netdevice flags on init */ int remote_if_index; /* remote netdevice IF_INDEX */ @@ -76,6 +86,7 @@ struct pmd_internals { struct rx_queue rxq[RTE_PMD_TAP_MAX_QUEUES]; /* List of RX queues */ struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */ struct rte_intr_handle intr_handle; /* LSC interrupt handle. */ + int ka_fd; /* keep-alive file descriptor */ }; /* tap_intr.c */ diff --git a/drivers/net/tap/tap_bpf.h b/drivers/net/tap/tap_bpf.h index 1a70ffe2..9192686a 100644 --- a/drivers/net/tap/tap_bpf.h +++ b/drivers/net/tap/tap_bpf.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 - * Copyright 2017 Mellanox Technologies, Ltd. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef __TAP_BPF_H__ diff --git a/drivers/net/tap/tap_bpf_api.c b/drivers/net/tap/tap_bpf_api.c index 109a681e..98f6a760 100644 --- a/drivers/net/tap/tap_bpf_api.c +++ b/drivers/net/tap/tap_bpf_api.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2017 Mellanox Technologies, Ltd. + * Copyright 2017 Mellanox Technologies, Ltd */ #include diff --git a/drivers/net/tap/tap_bpf_insns.h b/drivers/net/tap/tap_bpf_insns.h index 89873b6d..79e3e66b 100644 --- a/drivers/net/tap/tap_bpf_insns.h +++ b/drivers/net/tap/tap_bpf_insns.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2017 Mellanox Technologies, Ltd. + * Copyright 2017 Mellanox Technologies, Ltd */ #include diff --git a/drivers/net/tap/tap_bpf_program.c b/drivers/net/tap/tap_bpf_program.c index 8abb3b76..1cb73822 100644 --- a/drivers/net/tap/tap_bpf_program.c +++ b/drivers/net/tap/tap_bpf_program.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 - * Copyright 2017 Mellanox Technologies, Ltd. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -84,7 +84,7 @@ struct ipv6_l3_l4_tuple { __u16 sport; } __attribute__((packed)); -static const __u8 def_rss_key[] = { +static const __u8 def_rss_key[TAP_RSS_HASH_KEY_SIZE] = { 0xd1, 0x81, 0xc6, 0x2c, 0xf7, 0xf4, 0xdb, 0x5b, 0x19, 0x83, 0xa2, 0xfc, diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c index 551b2d83..6b60e6dc 100644 --- a/drivers/net/tap/tap_flow.c +++ b/drivers/net/tap/tap_flow.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -270,13 +270,13 @@ static const struct tap_flow_items tap_flow_items[] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_IPV6), .mask = &(const struct rte_flow_item_vlan){ - .tpid = -1, /* DEI matching is not supported */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN .tci = 0xffef, #else .tci = 0xefff, #endif + .inner_type = -1, }, .mask_sz = sizeof(struct rte_flow_item_vlan), .default_mask = &rte_flow_item_vlan_mask, @@ -578,13 +578,19 @@ tap_flow_create_vlan(const struct rte_flow_item *item, void *data) /* use default mask if none provided */ if (!mask) mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask; - /* TC does not support tpid masking. Only accept if exact match. */ - if (mask->tpid && mask->tpid != 0xffff) + /* Outer TPID cannot be matched. */ + if (info->eth_type) return -1; /* Double-tagging not supported. */ - if (spec && mask->tpid && spec->tpid != htons(ETH_P_8021Q)) + if (info->vlan) return -1; info->vlan = 1; + if (mask->inner_type) { + /* TC does not support partial eth_type masking */ + if (mask->inner_type != RTE_BE16(0xffff)) + return -1; + info->eth_type = spec->inner_type; + } if (!flow) return 0; msg = &flow->msg; @@ -1033,6 +1039,12 @@ priv_flow_process(struct pmd_internals *pmd, }; int action = 0; /* Only one action authorized for now */ + if (attr->transfer) { + rte_flow_error_set( + error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, "transfer is not supported"); + return -rte_errno; + } if (attr->group > MAX_GROUP) { rte_flow_error_set( error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -1140,6 +1152,7 @@ priv_flow_process(struct pmd_internals *pmd, else goto end; } +actions: for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { int err = 0; @@ -1214,7 +1227,7 @@ priv_flow_process(struct pmd_internals *pmd, if (err) goto exit_action_not_supported; } - if (flow && rss) + if (flow) err = rss_add_actions(flow, pmd, rss, error); } else { goto exit_action_not_supported; @@ -1222,6 +1235,16 @@ priv_flow_process(struct pmd_internals *pmd, if (err) goto exit_action_not_supported; } + /* When fate is unknown, drop traffic. */ + if (!action) { + static const struct rte_flow_action drop[] = { + { .type = RTE_FLOW_ACTION_TYPE_DROP, }, + { .type = RTE_FLOW_ACTION_TYPE_END, }, + }; + + actions = drop; + goto actions; + } end: if (flow) tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */ @@ -1376,8 +1399,8 @@ tap_flow_create(struct rte_eth_dev *dev, } err = tap_nl_recv_ack(pmd->nlsk_fd); if (err < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule creation (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule creation (%d): %s", errno, strerror(errno)); rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -1421,8 +1444,8 @@ tap_flow_create(struct rte_eth_dev *dev, } err = tap_nl_recv_ack(pmd->nlsk_fd); if (err < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule creation (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule creation (%d): %s", errno, strerror(errno)); rte_flow_error_set( error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, @@ -1476,8 +1499,8 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd, if (ret < 0 && errno == ENOENT) ret = 0; if (ret < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule deletion (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule deletion (%d): %s", errno, strerror(errno)); rte_flow_error_set( error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -1500,8 +1523,8 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd, if (ret < 0 && errno == ENOENT) ret = 0; if (ret < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule deletion (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule deletion (%d): %s", errno, strerror(errno)); rte_flow_error_set( error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, @@ -1545,10 +1568,14 @@ tap_flow_isolate(struct rte_eth_dev *dev, { struct pmd_internals *pmd = dev->data->dev_private; + /* normalize 'set' variable to contain 0 or 1 values */ if (set) - pmd->flow_isolate = 1; - else - pmd->flow_isolate = 0; + set = 1; + /* if already in the right isolation mode - nothing to do */ + if ((set ^ pmd->flow_isolate) == 0) + return 0; + /* mark the isolation mode for tap_flow_implicit_create() */ + pmd->flow_isolate = set; /* * If netdevice is there, setup appropriate flow rules immediately. * Otherwise it will be set when bringing up the netdevice (tun_alloc). @@ -1556,20 +1583,20 @@ tap_flow_isolate(struct rte_eth_dev *dev, if (!pmd->rxq[0].fd) return 0; if (set) { - struct rte_flow *flow; + struct rte_flow *remote_flow; while (1) { - flow = LIST_FIRST(&pmd->implicit_flows); - if (!flow) + remote_flow = LIST_FIRST(&pmd->implicit_flows); + if (!remote_flow) break; /* * Remove all implicit rules on the remote. * Keep the local rule to redirect packets on TX. * Keep also the last implicit local rule: ISOLATE. */ - if (flow->msg.t.tcm_ifindex == pmd->if_index) + if (remote_flow->msg.t.tcm_ifindex == pmd->if_index) break; - if (tap_flow_destroy_pmd(pmd, flow, NULL) < 0) + if (tap_flow_destroy_pmd(pmd, remote_flow, NULL) < 0) goto error; } /* Switch the TC rule according to pmd->flow_isolate */ @@ -1665,7 +1692,7 @@ int tap_flow_implicit_create(struct pmd_internals *pmd, remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0); if (!remote_flow) { - RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow\n"); + TAP_LOG(ERR, "Cannot allocate memory for rte_flow"); goto fail; } msg = &remote_flow->msg; @@ -1706,21 +1733,21 @@ int tap_flow_implicit_create(struct pmd_internals *pmd, tap_flow_set_handle(remote_flow); if (priv_flow_process(pmd, attr, items, actions, NULL, remote_flow, implicit_rte_flows[idx].mirred)) { - RTE_LOG(ERR, PMD, "rte flow rule validation failed\n"); + TAP_LOG(ERR, "rte flow rule validation failed"); goto fail; } err = tap_nl_send(pmd->nlsk_fd, &msg->nh); if (err < 0) { - RTE_LOG(ERR, PMD, "Failure sending nl request\n"); + TAP_LOG(ERR, "Failure sending nl request"); goto fail; } err = tap_nl_recv_ack(pmd->nlsk_fd); if (err < 0) { - /* Silently ignore re-entering remote promiscuous rule */ - if (errno == EEXIST && idx == TAP_REMOTE_PROMISC) + /* Silently ignore re-entering existing rule */ + if (errno == EEXIST) goto success; - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule creation (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule creation (%d): %s", errno, strerror(errno)); goto fail; } @@ -1836,8 +1863,8 @@ static int rss_enable(struct pmd_internals *pmd, sizeof(struct rss_key), MAX_RSS_KEYS); if (pmd->map_fd < 0) { - RTE_LOG(ERR, PMD, - "Failed to create BPF map (%d): %s\n", + TAP_LOG(ERR, + "Failed to create BPF map (%d): %s", errno, strerror(errno)); rte_flow_error_set( error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -1854,7 +1881,7 @@ static int rss_enable(struct pmd_internals *pmd, for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) { pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i); if (pmd->bpf_fd[i] < 0) { - RTE_LOG(ERR, PMD, + TAP_LOG(ERR, "Failed to load BPF section %s for queue %d", SEC_NAME_CLS_Q, i); rte_flow_error_set( @@ -1868,7 +1895,7 @@ static int rss_enable(struct pmd_internals *pmd, rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0); if (!rss_flow) { - RTE_LOG(ERR, PMD, + TAP_LOG(ERR, "Cannot allocate memory for rte_flow"); return -1; } @@ -1911,8 +1938,8 @@ static int rss_enable(struct pmd_internals *pmd, return -1; err = tap_nl_recv_ack(pmd->nlsk_fd); if (err < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule creation (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule creation (%d): %s", errno, strerror(errno)); return err; } @@ -2039,11 +2066,21 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd, struct rte_flow_error *error) { /* 4096 is the maximum number of instructions for a BPF program */ - int i; + unsigned int i; int err; struct rss_key rss_entry = { .hash_fields = 0, .key_size = 0 }; + /* Check supported RSS features */ + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "a nonzero RSS encapsulation level is not supported"); + /* Get a new map key for a new RSS rule */ err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx); if (err < 0) { @@ -2055,8 +2092,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd, } /* Update RSS map entry with queues */ - rss_entry.nb_queues = rss->num; - for (i = 0; i < rss->num; i++) + rss_entry.nb_queues = rss->queue_num; + for (i = 0; i < rss->queue_num; i++) rss_entry.queues[i] = rss->queue[i]; rss_entry.hash_fields = (1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4); @@ -2066,8 +2103,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd, &flow->key_idx, &rss_entry); if (err) { - RTE_LOG(ERR, PMD, - "Failed to update BPF map entry #%u (%d): %s\n", + TAP_LOG(ERR, + "Failed to update BPF map entry #%u (%d): %s", flow->key_idx, errno, strerror(errno)); rte_flow_error_set( error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -2085,8 +2122,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd, flow->bpf_fd[SEC_L3_L4] = tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd); if (flow->bpf_fd[SEC_L3_L4] < 0) { - RTE_LOG(ERR, PMD, - "Failed to load BPF section %s (%d): %s\n", + TAP_LOG(ERR, + "Failed to load BPF section %s (%d): %s", sec_name[SEC_L3_L4], errno, strerror(errno)); rte_flow_error_set( error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -2147,9 +2184,8 @@ tap_dev_filter_ctrl(struct rte_eth_dev *dev, *(const void **)arg = &tap_flow_ops; return 0; default: - RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported\n", - (void *)dev, filter_type); + TAP_LOG(ERR, "%p: filter type (%d) not supported", + dev, filter_type); } return -EINVAL; } - diff --git a/drivers/net/tap/tap_flow.h b/drivers/net/tap/tap_flow.h index ac6a952d..ac60a9ae 100644 --- a/drivers/net/tap/tap_flow.h +++ b/drivers/net/tap/tap_flow.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef _TAP_FLOW_H_ diff --git a/drivers/net/tap/tap_intr.c b/drivers/net/tap/tap_intr.c index b0e19914..fc590181 100644 --- a/drivers/net/tap/tap_intr.c +++ b/drivers/net/tap/tap_intr.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018 Mellanox Technologies, Ltd. + * Copyright 2018 Mellanox Technologies, Ltd */ /** @@ -62,7 +62,7 @@ tap_rx_intr_vec_install(struct rte_eth_dev *dev) intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n])); if (intr_handle->intr_vec == NULL) { rte_errno = ENOMEM; - RTE_LOG(ERR, PMD, + TAP_LOG(ERR, "failed to allocate memory for interrupt vector," " Rx interrupts will not be supported"); return -rte_errno; diff --git a/drivers/net/tap/tap_log.h b/drivers/net/tap/tap_log.h new file mode 100644 index 00000000..fa06843a --- /dev/null +++ b/drivers/net/tap/tap_log.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd + */ + +extern int tap_logtype; + +#define TAP_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, tap_logtype, "%s(): " fmt "\n", \ + __func__, ## args) diff --git a/drivers/net/tap/tap_netlink.c b/drivers/net/tap/tap_netlink.c index 82c8dc0e..6cb51009 100644 --- a/drivers/net/tap/tap_netlink.c +++ b/drivers/net/tap/tap_netlink.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -13,6 +13,7 @@ #include #include #include +#include "tap_log.h" /* Must be quite large to support dumping a huge list of QDISC or filters. */ #define BUF_SIZE (32 * 1024) /* Size of the buffer to receive kernel messages */ @@ -45,19 +46,19 @@ tap_nl_init(uint32_t nl_groups) fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE); if (fd < 0) { - RTE_LOG(ERR, PMD, "Unable to create a netlink socket\n"); + TAP_LOG(ERR, "Unable to create a netlink socket"); return -1; } if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int))) { - RTE_LOG(ERR, PMD, "Unable to set socket buffer send size\n"); + TAP_LOG(ERR, "Unable to set socket buffer send size"); return -1; } if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int))) { - RTE_LOG(ERR, PMD, "Unable to set socket buffer receive size\n"); + TAP_LOG(ERR, "Unable to set socket buffer receive size"); return -1; } if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) { - RTE_LOG(ERR, PMD, "Unable to bind to the netlink socket\n"); + TAP_LOG(ERR, "Unable to bind to the netlink socket"); return -1; } return fd; @@ -76,7 +77,7 @@ int tap_nl_final(int nlsk_fd) { if (close(nlsk_fd)) { - RTE_LOG(ERR, PMD, "Failed to close netlink socket: %s (%d)\n", + TAP_LOG(ERR, "Failed to close netlink socket: %s (%d)", strerror(errno), errno); return -1; } @@ -117,7 +118,7 @@ tap_nl_send(int nlsk_fd, struct nlmsghdr *nh) nh->nlmsg_seq = (uint32_t)rte_rand(); send_bytes = sendmsg(nlsk_fd, &msg, 0); if (send_bytes < 0) { - RTE_LOG(ERR, PMD, "Failed to send netlink message: %s (%d)\n", + TAP_LOG(ERR, "Failed to send netlink message: %s (%d)", strerror(errno), errno); return -1; } @@ -300,9 +301,8 @@ tap_nlattr_nested_start(struct nlmsg *msg, uint16_t type) tail = rte_zmalloc(NULL, sizeof(struct nested_tail), 0); if (!tail) { - RTE_LOG(ERR, PMD, - "Couldn't allocate memory for nested netlink" - " attribute\n"); + TAP_LOG(ERR, + "Couldn't allocate memory for nested netlink attribute"); return -1; } diff --git a/drivers/net/tap/tap_netlink.h b/drivers/net/tap/tap_netlink.h index fafef840..faa73ba1 100644 --- a/drivers/net/tap/tap_netlink.h +++ b/drivers/net/tap/tap_netlink.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef _TAP_NETLINK_H_ diff --git a/drivers/net/tap/tap_rss.h b/drivers/net/tap/tap_rss.h index 3bb0d140..17606b2d 100644 --- a/drivers/net/tap/tap_rss.h +++ b/drivers/net/tap/tap_rss.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2017 Mellanox Technologies, Ltd. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef _TAP_RSS_H_ @@ -9,6 +9,12 @@ #define TAP_MAX_QUEUES 16 #endif +/* Fixed RSS hash key size in bytes. */ +#define TAP_RSS_HASH_KEY_SIZE 40 + +/* Supported RSS */ +#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP)) + /* hashed fields for RSS */ enum hash_field { HASH_FIELD_IPV4_L3, /* IPv4 src/dst addr */ diff --git a/drivers/net/tap/tap_tcmsgs.c b/drivers/net/tap/tap_tcmsgs.c index 954f13eb..3c9d0366 100644 --- a/drivers/net/tap/tap_tcmsgs.c +++ b/drivers/net/tap/tap_tcmsgs.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -10,6 +10,7 @@ #include #include +#include "tap_log.h" struct qdisc { uint32_t handle; @@ -81,8 +82,8 @@ qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo) if (!nlsk_fd) { fd = tap_nl_init(0); if (fd < 0) { - RTE_LOG(ERR, PMD, - "Could not delete QDISC: null netlink socket\n"); + TAP_LOG(ERR, + "Could not delete QDISC: null netlink socket"); return -1; } } else { @@ -261,7 +262,7 @@ qdisc_create_multiq(int nlsk_fd, uint16_t ifindex) err = qdisc_add_multiq(nlsk_fd, ifindex); if (err < 0 && errno != -EEXIST) { - RTE_LOG(ERR, PMD, "Could not add multiq qdisc (%d): %s\n", + TAP_LOG(ERR, "Could not add multiq qdisc (%d): %s", errno, strerror(errno)); return -1; } @@ -287,7 +288,7 @@ qdisc_create_ingress(int nlsk_fd, uint16_t ifindex) err = qdisc_add_ingress(nlsk_fd, ifindex); if (err < 0 && errno != -EEXIST) { - RTE_LOG(ERR, PMD, "Could not add ingress qdisc (%d): %s\n", + TAP_LOG(ERR, "Could not add ingress qdisc (%d): %s", errno, strerror(errno)); return -1; } diff --git a/drivers/net/tap/tap_tcmsgs.h b/drivers/net/tap/tap_tcmsgs.h index f72f8c5c..8cedea84 100644 --- a/drivers/net/tap/tap_tcmsgs.h +++ b/drivers/net/tap/tap_tcmsgs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef _TAP_TCMSGS_H_ diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h index b13c21ff..b12c8ec5 100644 --- a/drivers/net/thunderx/base/nicvf_hw_defs.h +++ b/drivers/net/thunderx/base/nicvf_hw_defs.h @@ -171,7 +171,10 @@ /* Min/Max packet size */ #define NIC_HW_MIN_FRS (64) -#define NIC_HW_MAX_FRS (9200) /* 9216 max pkt including FCS */ +/* ETH_HLEN+ETH_FCS_LEN+2*VLAN_HLEN */ +#define NIC_HW_L2_OVERHEAD (26) +#define NIC_HW_MAX_MTU (9190) +#define NIC_HW_MAX_FRS (NIC_HW_MAX_MTU + NIC_HW_L2_OVERHEAD) #define NIC_HW_MAX_SEGS (12) /* Descriptor alignments */ diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c index a65361fb..99fcd516 100644 --- a/drivers/net/thunderx/nicvf_ethdev.c +++ b/drivers/net/thunderx/nicvf_ethdev.c @@ -15,7 +15,6 @@ #include #include -#include #include #include #include @@ -69,25 +68,14 @@ nicvf_init_log(void) rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE); } -static inline int -nicvf_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) +static void +nicvf_link_status_update(struct nicvf *nic, + struct rte_eth_link *link) { - struct rte_eth_link *dst = &dev->data->dev_link; - struct rte_eth_link *src = link; + memset(link, 0, sizeof(*link)); - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} + link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; -static inline void -nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) -{ - link->link_status = nic->link_up; - link->link_duplex = ETH_LINK_AUTONEG; if (nic->duplex == NICVF_HALF_DUPLEX) link->link_duplex = ETH_LINK_HALF_DUPLEX; else if (nic->duplex == NICVF_FULL_DUPLEX) @@ -101,12 +89,17 @@ nicvf_interrupt(void *arg) { struct rte_eth_dev *dev = arg; struct nicvf *nic = nicvf_pmd_priv(dev); + struct rte_eth_link link; if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { - if (dev->data->dev_conf.intr_conf.lsc) - nicvf_set_eth_link_status(nic, &dev->data->dev_link); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, - NULL); + if (dev->data->dev_conf.intr_conf.lsc) { + nicvf_link_status_update(nic, &link); + rte_eth_linkstatus_set(dev, &link); + + _rte_eth_dev_callback_process(dev, + RTE_ETH_EVENT_INTR_LSC, + NULL); + } } rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, @@ -153,24 +146,23 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) if (wait_to_complete) { /* rte_eth_link_get() might need to wait up to 9 seconds */ for (i = 0; i < MAX_CHECK_TIME; i++) { - memset(&link, 0, sizeof(link)); - nicvf_set_eth_link_status(nic, &link); - if (link.link_status) + nicvf_link_status_update(nic, &link); + if (link.link_status == ETH_LINK_UP) break; rte_delay_ms(CHECK_INTERVAL); } } else { - memset(&link, 0, sizeof(link)); - nicvf_set_eth_link_status(nic, &link); + nicvf_link_status_update(nic, &link); } - return nicvf_atomic_write_link_status(dev, &link); + + return rte_eth_linkstatus_set(dev, &link); } static int nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct nicvf *nic = nicvf_pmd_priv(dev); - uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD; size_t i; struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; @@ -188,7 +180,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * Refuse mtu that requires the support of scattered packets * when this feature has not been enabled before. */ - if (!dev->data->scattered_rx && + if (dev->data->dev_started && !dev->data->scattered_rx && (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) return -EINVAL; @@ -202,11 +194,11 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) else rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; - if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) + if (nicvf_mbox_update_hw_max_frs(nic, mtu)) return -EINVAL; - /* Update max frame size */ - rxmode->max_rx_pkt_len = (uint32_t)frame_size; + /* Update max_rx_pkt_len */ + rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN; nic->mtu = mtu; for (i = 0; i < nic->sqs_count; i++) @@ -939,7 +931,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, bool is_single_pool; struct nicvf_txq *txq; struct nicvf *nic = nicvf_pmd_priv(dev); - uint64_t conf_offloads, offload_capa, unsupported_offloads; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); @@ -953,17 +945,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", socket_id, nic->node); - conf_offloads = tx_conf->offloads; - offload_capa = NICVF_TX_OFFLOAD_CAPA; - - unsupported_offloads = conf_offloads & ~offload_capa; - if (unsupported_offloads) { - PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported." - "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n", - unsupported_offloads, conf_offloads, offload_capa); - return -ENOTSUP; - } - /* Tx deferred start is not supported */ if (tx_conf->tx_deferred_start) { PMD_INIT_LOG(ERR, "Tx deferred start not supported"); @@ -1015,9 +996,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, txq->tx_free_thresh = tx_free_thresh; txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; - txq->offloads = conf_offloads; + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + txq->offloads = offloads; - is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE); + is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE); /* Choose optimum free threshold value for multipool case */ if (!is_single_pool) { @@ -1277,7 +1259,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, uint16_t rx_free_thresh; struct nicvf_rxq *rxq; struct nicvf *nic = nicvf_pmd_priv(dev); - uint64_t conf_offloads, offload_capa, unsupported_offloads; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); @@ -1291,24 +1273,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", socket_id, nic->node); - - conf_offloads = rx_conf->offloads; - - if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) { - PMD_INIT_LOG(NOTICE, "Rx checksum not supported"); - conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM; - } - - offload_capa = NICVF_RX_OFFLOAD_CAPA; - unsupported_offloads = conf_offloads & ~offload_capa; - - if (unsupported_offloads) { - PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. " - "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - unsupported_offloads, conf_offloads, offload_capa); - return -ENOTSUP; - } - /* Mempool memory must be contiguous, so must be one memory segment*/ if (mp->nb_mem_chunks != 1) { PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages"); @@ -1316,7 +1280,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, } /* Mempool memory must be physically contiguous */ - if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) { + if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) { PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous"); return -EINVAL; } @@ -1389,10 +1353,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, nicvf_rx_queue_reset(rxq); + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)" " phy=0x%" PRIx64 " offloads=0x%" PRIx64, nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc, - rte_mempool_avail_count(mp), rxq->phys, conf_offloads); + rte_mempool_avail_count(mp), rxq->phys, offloads); dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq; dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] = @@ -1408,8 +1373,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) PMD_INIT_FUNC_TRACE(); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - /* Autonegotiation may be disabled */ dev_info->speed_capa = ETH_LINK_SPEED_FIXED; dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M | @@ -1418,7 +1381,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa |= ETH_LINK_SPEED_40G; dev_info->min_rx_bufsize = ETHER_MIN_MTU; - dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; + dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN; dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); dev_info->max_tx_queues = @@ -1445,12 +1408,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, - .txq_flags = - ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOREFCOUNT | - ETH_TXQ_FLAGS_NOMULTMEMP | - ETH_TXQ_FLAGS_NOVLANOFFL | - ETH_TXQ_FLAGS_NOXSUMSCTP, .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | @@ -1751,8 +1708,7 @@ nicvf_dev_start(struct rte_eth_dev *dev) /* Setup MTU based on max_rx_pkt_len or default */ mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ? dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN - : ETHER_MTU; + - ETHER_HDR_LEN : ETHER_MTU; if (nicvf_dev_set_mtu(dev, mtu)) { PMD_INIT_LOG(ERR, "Failed to set default mtu size"); @@ -1923,8 +1879,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev) struct rte_eth_txmode *txmode = &conf->txmode; struct nicvf *nic = nicvf_pmd_priv(dev); uint8_t cqcount; - uint64_t conf_rx_offloads, rx_offload_capa; - uint64_t conf_tx_offloads, tx_offload_capa; PMD_INIT_FUNC_TRACE(); @@ -1933,32 +1887,7 @@ nicvf_dev_configure(struct rte_eth_dev *dev) return -EINVAL; } - conf_tx_offloads = dev->data->dev_conf.txmode.offloads; - tx_offload_capa = NICVF_TX_OFFLOAD_CAPA; - - if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) { - PMD_INIT_LOG(ERR, "Some Tx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - conf_tx_offloads, tx_offload_capa); - return -ENOTSUP; - } - - if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) { - PMD_INIT_LOG(NOTICE, "Rx checksum not supported"); - rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM; - } - - conf_rx_offloads = rxmode->offloads; - rx_offload_capa = NICVF_RX_OFFLOAD_CAPA; - - if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) { - PMD_INIT_LOG(ERR, "Some Rx offloads are not supported " - "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - conf_rx_offloads, rx_offload_capa); - return -ENOTSUP; - } - - if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) { + if ((rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) { PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP; } diff --git a/drivers/net/vdev_netvsc/Makefile b/drivers/net/vdev_netvsc/Makefile index 7be17137..690cb8f8 100644 --- a/drivers/net/vdev_netvsc/Makefile +++ b/drivers/net/vdev_netvsc/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017 6WIND S.A. -# Copyright 2017 Mellanox Technologies, Ltd. +# Copyright 2017 Mellanox Technologies, Ltd include $(RTE_SDK)/mk/rte.vars.mk diff --git a/drivers/net/vdev_netvsc/vdev_netvsc.c b/drivers/net/vdev_netvsc/vdev_netvsc.c index cbf4d590..48717f2f 100644 --- a/drivers/net/vdev_netvsc/vdev_netvsc.c +++ b/drivers/net/vdev_netvsc/vdev_netvsc.c @@ -1,12 +1,14 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox Technologies, Ltd. + * Copyright 2017 Mellanox Technologies, Ltd */ #include #include #include #include +#include +#include #include #include #include @@ -33,9 +35,11 @@ #include #include #include +#include #define VDEV_NETVSC_DRIVER net_vdev_netvsc #define VDEV_NETVSC_DRIVER_NAME RTE_STR(VDEV_NETVSC_DRIVER) +#define VDEV_NETVSC_DRIVER_NAME_LEN 15 #define VDEV_NETVSC_ARG_IFACE "iface" #define VDEV_NETVSC_ARG_MAC "mac" #define VDEV_NETVSC_ARG_FORCE "force" @@ -95,6 +99,43 @@ vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx) free(ctx); } +/** + * Determine if a network interface is NetVSC. + * + * @param[in] iface + * Pointer to netdevice description structure (name and index). + * + * @return + * A nonzero value when interface is detected as NetVSC. In case of error, + * rte_errno is updated and 0 returned. + */ +static int +vdev_netvsc_iface_is_netvsc(const struct if_nameindex *iface) +{ + static const char temp[] = "/sys/class/net/%s/device/class_id"; + char path[sizeof(temp) + IF_NAMESIZE]; + FILE *f; + int ret; + int len = 0; + + ret = snprintf(path, sizeof(path), temp, iface->if_name); + if (ret == -1 || (size_t)ret >= sizeof(path)) { + rte_errno = ENOBUFS; + return 0; + } + f = fopen(path, "r"); + if (!f) { + rte_errno = errno; + return 0; + } + ret = fscanf(f, NETVSC_CLASS_ID "%n", &len); + if (ret == EOF) + rte_errno = errno; + ret = len == (int)strlen(NETVSC_CLASS_ID); + fclose(f); + return ret; +} + /** * Iterate over system network interfaces. * @@ -104,6 +145,8 @@ vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx) * @param func * Callback function pointer. List traversal is aborted when this function * returns a nonzero value. + * @param is_netvsc + * Indicates the device type to iterate - netvsc or non-netvsc. * @param ... * Variable parameter list passed as @p va_list to @p func. * @@ -115,7 +158,7 @@ vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx) static int vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface, const struct ether_addr *eth_addr, - va_list ap), ...) + va_list ap), int is_netvsc, ...) { struct if_nameindex *iface = if_nameindex(); int s = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); @@ -133,11 +176,15 @@ vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface, goto error; } for (i = 0; iface[i].if_name; ++i) { + int is_netvsc_ret; struct ifreq req; struct ether_addr eth_addr; va_list ap; - strncpy(req.ifr_name, iface[i].if_name, sizeof(req.ifr_name)); + is_netvsc_ret = vdev_netvsc_iface_is_netvsc(&iface[i]) ? 1 : 0; + if (is_netvsc ^ is_netvsc_ret) + continue; + strlcpy(req.ifr_name, iface[i].if_name, sizeof(req.ifr_name)); if (ioctl(s, SIOCGIFHWADDR, &req) == -1) { DRV_LOG(WARNING, "cannot retrieve information about" " interface \"%s\": %s", @@ -151,7 +198,7 @@ vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface, } memcpy(eth_addr.addr_bytes, req.ifr_hwaddr.sa_data, RTE_DIM(eth_addr.addr_bytes)); - va_start(ap, func); + va_start(ap, is_netvsc); ret = func(&iface[i], ð_addr, ap); va_end(ap); if (ret) @@ -165,78 +212,101 @@ error: return ret; } -/** - * Determine if a network interface is NetVSC. - * - * @param[in] iface - * Pointer to netdevice description structure (name and index). - * - * @return - * A nonzero value when interface is detected as NetVSC. In case of error, - * rte_errno is updated and 0 returned. - */ -static int -vdev_netvsc_iface_is_netvsc(const struct if_nameindex *iface) -{ - static const char temp[] = "/sys/class/net/%s/device/class_id"; - char path[sizeof(temp) + IF_NAMESIZE]; - FILE *f; - int ret; - int len = 0; - - ret = snprintf(path, sizeof(path), temp, iface->if_name); - if (ret == -1 || (size_t)ret >= sizeof(path)) { - rte_errno = ENOBUFS; - return 0; - } - f = fopen(path, "r"); - if (!f) { - rte_errno = errno; - return 0; - } - ret = fscanf(f, NETVSC_CLASS_ID "%n", &len); - if (ret == EOF) - rte_errno = errno; - ret = len == (int)strlen(NETVSC_CLASS_ID); - fclose(f); - return ret; -} - /** * Determine if a network interface has a route. * * @param[in] name * Network device name. + * @param[in] family + * Address family: AF_INET for IPv4 or AF_INET6 for IPv6. * * @return - * A nonzero value when interface has an route. In case of error, - * rte_errno is updated and 0 returned. + * 1 when interface has a route, negative errno value in case of error and + * 0 otherwise. */ static int -vdev_netvsc_has_route(const char *name) +vdev_netvsc_has_route(const struct if_nameindex *iface, + const unsigned char family) { - FILE *fp; + /* + * The implementation can be simpler by getifaddrs() function usage but + * it works for IPv6 only starting from glibc 2.3.3. + */ + char buf[4096]; + int len; int ret = 0; - char route[NETVSC_MAX_ROUTE_LINE_SIZE]; - char *netdev; - - fp = fopen("/proc/net/route", "r"); - if (!fp) { - rte_errno = errno; - return 0; + int res; + int sock; + struct nlmsghdr *retmsg = (struct nlmsghdr *)buf; + struct sockaddr_nl sa; + struct { + struct nlmsghdr nlhdr; + struct ifaddrmsg addrmsg; + } msg; + + if (!iface || (family != AF_INET && family != AF_INET6)) { + DRV_LOG(ERR, "%s", rte_strerror(EINVAL)); + return -EINVAL; } - while (fgets(route, NETVSC_MAX_ROUTE_LINE_SIZE, fp) != NULL) { - netdev = strtok(route, "\t"); - if (strcmp(netdev, name) == 0) { - ret = 1; - break; + sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if (sock == -1) { + DRV_LOG(ERR, "cannot open socket: %s", rte_strerror(errno)); + return -errno; + } + memset(&sa, 0, sizeof(sa)); + sa.nl_family = AF_NETLINK; + sa.nl_groups = RTMGRP_LINK | RTMGRP_IPV4_IFADDR; + res = bind(sock, (struct sockaddr *)&sa, sizeof(sa)); + if (res == -1) { + ret = -errno; + DRV_LOG(ERR, "cannot bind socket: %s", rte_strerror(errno)); + goto close; + } + memset(&msg, 0, sizeof(msg)); + msg.nlhdr.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg)); + msg.nlhdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP; + msg.nlhdr.nlmsg_type = RTM_GETADDR; + msg.nlhdr.nlmsg_pid = getpid(); + msg.addrmsg.ifa_family = family; + msg.addrmsg.ifa_index = iface->if_index; + res = send(sock, &msg, msg.nlhdr.nlmsg_len, 0); + if (res == -1) { + ret = -errno; + DRV_LOG(ERR, "cannot send socket message: %s", + rte_strerror(errno)); + goto close; + } + memset(buf, 0, sizeof(buf)); + len = recv(sock, buf, sizeof(buf), 0); + if (len == -1) { + ret = -errno; + DRV_LOG(ERR, "cannot receive socket message: %s", + rte_strerror(errno)); + goto close; + } + while (NLMSG_OK(retmsg, (unsigned int)len)) { + struct ifaddrmsg *retaddr = + (struct ifaddrmsg *)NLMSG_DATA(retmsg); + + if (retaddr->ifa_family == family && + retaddr->ifa_index == iface->if_index) { + struct rtattr *retrta = IFA_RTA(retaddr); + int attlen = IFA_PAYLOAD(retmsg); + + while (RTA_OK(retrta, attlen)) { + if (retrta->rta_type == IFA_ADDRESS) { + ret = 1; + DRV_LOG(DEBUG, "interface %s has IP", + iface->if_name); + goto close; + } + retrta = RTA_NEXT(retrta, attlen); + } } - /* Move file pointer to the next line. */ - while (strchr(route, '\n') == NULL && - fgets(route, NETVSC_MAX_ROUTE_LINE_SIZE, fp) != NULL) - ; + retmsg = NLMSG_NEXT(retmsg, len); } - fclose(fp); +close: + close(sock); return ret; } @@ -259,12 +329,15 @@ static int vdev_netvsc_sysfs_readlink(char *buf, size_t size, const char *if_name, const char *relpath) { + struct vdev_netvsc_ctx *ctx; + char in[RTE_MAX(sizeof(ctx->yield), 256u)]; int ret; - ret = snprintf(buf, size, "/sys/class/net/%s/%s", if_name, relpath); - if (ret == -1 || (size_t)ret >= size) + ret = snprintf(in, sizeof(in) - 1, "/sys/class/net/%s/%s", + if_name, relpath); + if (ret == -1 || (size_t)ret >= sizeof(in)) return -ENOBUFS; - ret = readlink(buf, buf, size); + ret = readlink(in, buf, size); if (ret == -1) return -errno; if ((size_t)ret >= size - 1) @@ -314,11 +387,9 @@ vdev_netvsc_device_probe(const struct if_nameindex *iface, DRV_LOG(DEBUG, "NetVSC interface \"%s\" (index %u) renamed \"%s\"", ctx->if_name, ctx->if_index, iface->if_name); - strncpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name)); + strlcpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name)); return 0; } - if (vdev_netvsc_iface_is_netvsc(iface)) - return 0; if (!is_same_ether_addr(eth_addr, &ctx->if_addr)) return 0; /* Look for associated PCI device. */ @@ -387,7 +458,8 @@ vdev_netvsc_alarm(__rte_unused void *arg) int ret; LIST_FOREACH(ctx, &vdev_netvsc_ctx_list, entry) { - ret = vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, ctx); + ret = vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, 0, + ctx); if (ret < 0) break; } @@ -443,7 +515,6 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface, { const char *name = va_arg(ap, const char *); struct rte_kvargs *kvargs = va_arg(ap, struct rte_kvargs *); - int force = va_arg(ap, int); unsigned int specified = va_arg(ap, unsigned int); unsigned int *matched = va_arg(ap, unsigned int *); unsigned int i; @@ -497,18 +568,12 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface, iface->if_name, iface->if_index); return 0; } - if (!vdev_netvsc_iface_is_netvsc(iface)) { - if (!specified || !force) - return 0; - DRV_LOG(WARNING, - "using non-NetVSC interface \"%s\" (index %u)", - iface->if_name, iface->if_index); - } /* Routed NetVSC should not be probed. */ - if (vdev_netvsc_has_route(iface->if_name)) { - if (!specified || !force) + if (vdev_netvsc_has_route(iface, AF_INET) || + vdev_netvsc_has_route(iface, AF_INET6)) { + if (!specified) return 0; - DRV_LOG(WARNING, "using routed NetVSC interface \"%s\"" + DRV_LOG(WARNING, "probably using routed NetVSC interface \"%s\"" " (index %u)", iface->if_name, iface->if_index); } /* Create interface context. */ @@ -520,7 +585,7 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface, goto error; } ctx->id = vdev_netvsc_ctx_count; - strncpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name)); + strlcpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name)); ctx->if_index = iface->if_index; ctx->if_addr = *eth_addr; ctx->pipe[0] = -1; @@ -551,13 +616,13 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface, name, ctx->id); if (ret == -1 || (size_t)ret >= sizeof(ctx->name)) ++i; - ret = snprintf(ctx->devname, sizeof(ctx->devname), "net_failsafe_%s", - ctx->name); + ret = snprintf(ctx->devname, sizeof(ctx->devname), "net_failsafe_vsc%u", + ctx->id); if (ret == -1 || (size_t)ret >= sizeof(ctx->devname)) ++i; ret = snprintf(ctx->devargs, sizeof(ctx->devargs), - "fd(%d),dev(net_tap_%s,remote=%s)", - ctx->pipe[0], ctx->name, ctx->if_name); + "fd(%d),dev(net_tap_vsc%u,remote=%s)", + ctx->pipe[0], ctx->id, ctx->if_name); if (ret == -1 || (size_t)ret >= sizeof(ctx->devargs)) ++i; if (i) { @@ -569,7 +634,7 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface, /* Request virtual device generation. */ DRV_LOG(DEBUG, "generating virtual device \"%s\" with arguments \"%s\"", ctx->devname, ctx->devargs); - vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, ctx); + vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, 0, ctx); ret = rte_eal_hotplug_add("vdev", ctx->devname, ctx->devargs); if (ret) goto error; @@ -639,16 +704,32 @@ vdev_netvsc_vdev_probe(struct rte_vdev_device *dev) rte_kvargs_free(kvargs); return 0; } + if (specified > 1) { + DRV_LOG(ERR, "More than one way used to specify the netvsc" + " device."); + goto error; + } rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL); /* Gather interfaces. */ - ret = vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, name, kvargs, - force, specified, &matched); + ret = vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, 1, name, + kvargs, specified, &matched); if (ret < 0) goto error; - if (matched < specified) - DRV_LOG(WARNING, - "some of the specified parameters did not match" - " recognized network interfaces"); + if (specified && matched < specified) { + if (!force) { + DRV_LOG(ERR, "Cannot find the specified netvsc device"); + goto error; + } + /* Try to force probing on non-netvsc specified device. */ + if (vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, 0, name, + kvargs, specified, &matched) < 0) + goto error; + if (matched < specified) { + DRV_LOG(ERR, "Cannot find the specified device"); + goto error; + } + DRV_LOG(WARNING, "non-netvsc device was probed as netvsc"); + } ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000, vdev_netvsc_alarm, NULL); if (ret < 0) { @@ -718,7 +799,8 @@ static int vdev_netvsc_cmp_rte_device(const struct rte_device *dev1, __rte_unused const void *_dev2) { - return strcmp(dev1->devargs->name, VDEV_NETVSC_DRIVER_NAME); + return strncmp(dev1->devargs->name, VDEV_NETVSC_DRIVER_NAME, + VDEV_NETVSC_DRIVER_NAME_LEN); } /** @@ -733,14 +815,15 @@ vdev_netvsc_scan_callback(__rte_unused void *arg) struct rte_devargs *devargs; struct rte_bus *vbus = rte_bus_find_by_name("vdev"); - TAILQ_FOREACH(devargs, &devargs_list, next) - if (!strcmp(devargs->name, VDEV_NETVSC_DRIVER_NAME)) + RTE_EAL_DEVARGS_FOREACH("vdev", devargs) + if (!strncmp(devargs->name, VDEV_NETVSC_DRIVER_NAME, + VDEV_NETVSC_DRIVER_NAME_LEN)) return; dev = (struct rte_vdev_device *)vbus->find_device(NULL, vdev_netvsc_cmp_rte_device, VDEV_NETVSC_DRIVER_NAME); if (dev) return; - if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, VDEV_NETVSC_DRIVER_NAME)) + if (rte_devargs_add(RTE_DEVTYPE_VIRTUAL, VDEV_NETVSC_DRIVER_NAME)) DRV_LOG(ERR, "unable to add netvsc devargs."); } diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 3aae01c3..ba9d768a 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2016 IGEL Co., Ltd. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of IGEL Co.,Ltd. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 IGEL Co., Ltd. + * Copyright(c) 2016-2018 Intel Corporation */ #include #include @@ -46,6 +18,11 @@ #include "rte_eth_vhost.h" +static int vhost_logtype; + +#define VHOST_LOG(level, ...) \ + rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__) + enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM}; #define ETH_VHOST_IFACE_ARG "iface" @@ -117,7 +94,9 @@ struct pmd_internal { char *dev_name; char *iface_name; uint16_t max_queues; + int vid; rte_atomic32_t started; + uint8_t vlan_strip; }; struct internal_list { @@ -421,6 +400,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) for (i = 0; likely(i < nb_rx); i++) { bufs[i]->port = r->port; + bufs[i]->vlan_tci = 0; + + if (r->internal->vlan_strip) + rte_vlan_strip(bufs[i]); + r->stats.bytes += bufs[i]->pkt_len; } @@ -437,7 +421,7 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { struct vhost_queue *r = q; uint16_t i, nb_tx = 0; - uint16_t nb_send = nb_bufs; + uint16_t nb_send = 0; if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) return 0; @@ -447,6 +431,22 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) goto out; + for (i = 0; i < nb_bufs; i++) { + struct rte_mbuf *m = bufs[i]; + + /* Do VLAN tag insertion */ + if (m->ol_flags & PKT_TX_VLAN_PKT) { + int error = rte_vlan_insert(&m); + if (unlikely(error)) { + rte_pktmbuf_free(m); + continue; + } + } + + bufs[nb_send] = m; + ++nb_send; + } + /* Enqueue packets to guest RX queue */ while (nb_send) { uint16_t nb_pkts; @@ -488,6 +488,11 @@ out: static int eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { + struct pmd_internal *internal = dev->data->dev_private; + const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + + internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + return 0; } @@ -519,6 +524,136 @@ find_internal_resource(char *ifname) return list; } +static int +eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid) +{ + struct rte_vhost_vring vring; + struct vhost_queue *vq; + int ret = 0; + + vq = dev->data->rx_queues[qid]; + if (!vq) { + VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid); + return -1; + } + + ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring); + if (ret < 0) { + VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid); + return ret; + } + VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid); + rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1); + rte_wmb(); + + return ret; +} + +static int +eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid) +{ + struct rte_vhost_vring vring; + struct vhost_queue *vq; + int ret = 0; + + vq = dev->data->rx_queues[qid]; + if (!vq) { + VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid); + return -1; + } + + ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring); + if (ret < 0) { + VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid); + return ret; + } + VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid); + rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0); + rte_wmb(); + + return 0; +} + +static void +eth_vhost_uninstall_intr(struct rte_eth_dev *dev) +{ + struct rte_intr_handle *intr_handle = dev->intr_handle; + + if (intr_handle) { + if (intr_handle->intr_vec) + free(intr_handle->intr_vec); + free(intr_handle); + } + + dev->intr_handle = NULL; +} + +static int +eth_vhost_install_intr(struct rte_eth_dev *dev) +{ + struct rte_vhost_vring vring; + struct vhost_queue *vq; + int count = 0; + int nb_rxq = dev->data->nb_rx_queues; + int i; + int ret; + + /* uninstall firstly if we are reconnecting */ + if (dev->intr_handle) + eth_vhost_uninstall_intr(dev); + + dev->intr_handle = malloc(sizeof(*dev->intr_handle)); + if (!dev->intr_handle) { + VHOST_LOG(ERR, "Fail to allocate intr_handle\n"); + return -ENOMEM; + } + memset(dev->intr_handle, 0, sizeof(*dev->intr_handle)); + + dev->intr_handle->efd_counter_size = sizeof(uint64_t); + + dev->intr_handle->intr_vec = + malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0])); + + if (!dev->intr_handle->intr_vec) { + VHOST_LOG(ERR, + "Failed to allocate memory for interrupt vector\n"); + free(dev->intr_handle); + return -ENOMEM; + } + + VHOST_LOG(INFO, "Prepare intr vec\n"); + for (i = 0; i < nb_rxq; i++) { + vq = dev->data->rx_queues[i]; + if (!vq) { + VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i); + continue; + } + + ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring); + if (ret < 0) { + VHOST_LOG(INFO, + "Failed to get rxq-%d's vring, skip!\n", i); + continue; + } + + if (vring.kickfd < 0) { + VHOST_LOG(INFO, + "rxq-%d's kickfd is invalid, skip!\n", i); + continue; + } + dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; + dev->intr_handle->efds[i] = vring.kickfd; + count++; + VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i); + } + + dev->intr_handle->nb_efd = count; + dev->intr_handle->max_intr = count + 1; + dev->intr_handle->type = RTE_INTR_HANDLE_VDEV; + + return 0; +} + static void update_queuing_status(struct rte_eth_dev *dev) { @@ -527,6 +662,9 @@ update_queuing_status(struct rte_eth_dev *dev) unsigned int i; int allow_queuing = 1; + if (!dev->data->rx_queues || !dev->data->tx_queues) + return; + if (rte_atomic32_read(&internal->started) == 0 || rte_atomic32_read(&internal->dev_attached) == 0) allow_queuing = 0; @@ -551,13 +689,37 @@ update_queuing_status(struct rte_eth_dev *dev) } } +static void +queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal) +{ + struct vhost_queue *vq; + int i; + + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + vq = eth_dev->data->rx_queues[i]; + if (!vq) + continue; + vq->vid = internal->vid; + vq->internal = internal; + vq->port = eth_dev->data->port_id; + } + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + vq = eth_dev->data->tx_queues[i]; + if (!vq) + continue; + vq->vid = internal->vid; + vq->internal = internal; + vq->port = eth_dev->data->port_id; + } +} + static int new_device(int vid) { struct rte_eth_dev *eth_dev; struct internal_list *list; struct pmd_internal *internal; - struct vhost_queue *vq; + struct rte_eth_conf *dev_conf; unsigned i; char ifname[PATH_MAX]; #ifdef RTE_LIBRTE_VHOST_NUMA @@ -567,12 +729,13 @@ new_device(int vid) rte_vhost_get_ifname(vid, ifname, sizeof(ifname)); list = find_internal_resource(ifname); if (list == NULL) { - RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname); + VHOST_LOG(INFO, "Invalid device name: %s\n", ifname); return -1; } eth_dev = list->eth_dev; internal = eth_dev->data->dev_private; + dev_conf = ð_dev->data->dev_conf; #ifdef RTE_LIBRTE_VHOST_NUMA newnode = rte_vhost_get_numa_node(vid); @@ -580,21 +743,19 @@ new_device(int vid) eth_dev->data->numa_node = newnode; #endif - for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { - vq = eth_dev->data->rx_queues[i]; - if (vq == NULL) - continue; - vq->vid = vid; - vq->internal = internal; - vq->port = eth_dev->data->port_id; - } - for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { - vq = eth_dev->data->tx_queues[i]; - if (vq == NULL) - continue; - vq->vid = vid; - vq->internal = internal; - vq->port = eth_dev->data->port_id; + internal->vid = vid; + if (rte_atomic32_read(&internal->started) == 1) { + queue_setup(eth_dev, internal); + + if (dev_conf->intr_conf.rxq) { + if (eth_vhost_install_intr(eth_dev) < 0) { + VHOST_LOG(INFO, + "Failed to install interrupt handler."); + return -1; + } + } + } else { + VHOST_LOG(INFO, "RX/TX queues not exist yet\n"); } for (i = 0; i < rte_vhost_get_vring_num(vid); i++) @@ -607,7 +768,7 @@ new_device(int vid) rte_atomic32_set(&internal->dev_attached, 1); update_queuing_status(eth_dev); - RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid); + VHOST_LOG(INFO, "Vhost device %d created\n", vid); _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); @@ -628,7 +789,7 @@ destroy_device(int vid) rte_vhost_get_ifname(vid, ifname, sizeof(ifname)); list = find_internal_resource(ifname); if (list == NULL) { - RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname); + VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname); return; } eth_dev = list->eth_dev; @@ -639,17 +800,19 @@ destroy_device(int vid) eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; - for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { - vq = eth_dev->data->rx_queues[i]; - if (vq == NULL) - continue; - vq->vid = -1; - } - for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { - vq = eth_dev->data->tx_queues[i]; - if (vq == NULL) - continue; - vq->vid = -1; + if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + vq = eth_dev->data->rx_queues[i]; + if (!vq) + continue; + vq->vid = -1; + } + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + vq = eth_dev->data->tx_queues[i]; + if (!vq) + continue; + vq->vid = -1; + } } state = vring_states[eth_dev->data->port_id]; @@ -661,7 +824,8 @@ destroy_device(int vid) state->max_vring = 0; rte_spinlock_unlock(&state->lock); - RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid); + VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid); + eth_vhost_uninstall_intr(eth_dev); _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); } @@ -677,7 +841,7 @@ vring_state_changed(int vid, uint16_t vring, int enable) rte_vhost_get_ifname(vid, ifname, sizeof(ifname)); list = find_internal_resource(ifname); if (list == NULL) { - RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname); + VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname); return -1; } @@ -689,7 +853,7 @@ vring_state_changed(int vid, uint16_t vring, int enable) state->max_vring = RTE_MAX(vring, state->max_vring); rte_spinlock_unlock(&state->lock); - RTE_LOG(INFO, PMD, "vring%u is %s\n", + VHOST_LOG(INFO, "vring%u is %s\n", vring, enable ? "enabled" : "disabled"); _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL); @@ -712,13 +876,13 @@ rte_eth_vhost_get_queue_event(uint16_t port_id, int idx; if (port_id >= RTE_MAX_ETHPORTS) { - RTE_LOG(ERR, PMD, "Invalid port id\n"); + VHOST_LOG(ERR, "Invalid port id\n"); return -1; } state = vring_states[port_id]; if (!state) { - RTE_LOG(ERR, PMD, "Unused port\n"); + VHOST_LOG(ERR, "Unused port\n"); return -1; } @@ -770,12 +934,25 @@ rte_eth_vhost_get_vid_from_port_id(uint16_t port_id) } static int -eth_dev_start(struct rte_eth_dev *dev) +eth_dev_start(struct rte_eth_dev *eth_dev) { - struct pmd_internal *internal = dev->data->dev_private; + struct pmd_internal *internal = eth_dev->data->dev_private; + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + + queue_setup(eth_dev, internal); + + if (rte_atomic32_read(&internal->dev_attached) == 1) { + if (dev_conf->intr_conf.rxq) { + if (eth_vhost_install_intr(eth_dev) < 0) { + VHOST_LOG(INFO, + "Failed to install interrupt handler."); + return -1; + } + } + } rte_atomic32_set(&internal->started, 1); - update_queuing_status(dev); + update_queuing_status(eth_dev); return 0; } @@ -813,10 +990,13 @@ eth_dev_close(struct rte_eth_dev *dev) pthread_mutex_unlock(&internal_list_lock); rte_free(list); - for (i = 0; i < dev->data->nb_rx_queues; i++) - rte_free(dev->data->rx_queues[i]); - for (i = 0; i < dev->data->nb_tx_queues; i++) - rte_free(dev->data->tx_queues[i]); + if (dev->data->rx_queues) + for (i = 0; i < dev->data->nb_rx_queues; i++) + rte_free(dev->data->rx_queues[i]); + + if (dev->data->tx_queues) + for (i = 0; i < dev->data->nb_tx_queues; i++) + rte_free(dev->data->tx_queues[i]); rte_free(dev->data->mac_addrs); free(internal->dev_name); @@ -838,7 +1018,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue), RTE_CACHE_LINE_SIZE, socket_id); if (vq == NULL) { - RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n"); + VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n"); return -ENOMEM; } @@ -860,7 +1040,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue), RTE_CACHE_LINE_SIZE, socket_id); if (vq == NULL) { - RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n"); + VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n"); return -ENOMEM; } @@ -878,7 +1058,7 @@ eth_dev_info(struct rte_eth_dev *dev, internal = dev->data->dev_private; if (internal == NULL) { - RTE_LOG(ERR, PMD, "Invalid device specified\n"); + VHOST_LOG(ERR, "Invalid device specified\n"); return; } @@ -887,6 +1067,10 @@ eth_dev_info(struct rte_eth_dev *dev, dev_info->max_rx_queues = internal->max_queues; dev_info->max_tx_queues = internal->max_queues; dev_info->min_rx_bufsize = 0; + + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; } static int @@ -1007,6 +1191,8 @@ static const struct eth_dev_ops ops = { .xstats_reset = vhost_dev_xstats_reset, .xstats_get = vhost_dev_xstats_get, .xstats_get_names = vhost_dev_xstats_get_names, + .rx_queue_intr_enable = eth_rxq_intr_enable, + .rx_queue_intr_disable = eth_rxq_intr_disable, }; static struct rte_vdev_driver pmd_vhost_drv; @@ -1016,23 +1202,16 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name, int16_t queues, const unsigned int numa_node, uint64_t flags) { const char *name = rte_vdev_device_name(dev); - struct rte_eth_dev_data *data = NULL; + struct rte_eth_dev_data *data; struct pmd_internal *internal = NULL; struct rte_eth_dev *eth_dev = NULL; struct ether_addr *eth_addr = NULL; struct rte_vhost_vring_state *vring_state = NULL; struct internal_list *list = NULL; - RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n", + VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n", numa_node); - /* now do all data allocation - for eth_dev structure and internal - * (private) data - */ - data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node); - if (data == NULL) - goto error; - list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node); if (list == NULL) goto error; @@ -1074,15 +1253,11 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name, rte_spinlock_init(&vring_state->lock); vring_states[eth_dev->data->port_id] = vring_state; - /* We'll replace the 'data' originally allocated by eth_dev. So the - * vhost PMD resources won't be shared between multi processes. - */ - rte_memcpy(data, eth_dev->data, sizeof(*data)); - eth_dev->data = data; - + data = eth_dev->data; data->nb_rx_queues = queues; data->nb_tx_queues = queues; internal->max_queues = queues; + internal->vid = -1; data->dev_link = pmd_link; data->mac_addrs = eth_addr; data->dev_flags = RTE_ETH_DEV_INTR_LSC; @@ -1097,16 +1272,17 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name, goto error; if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) { - RTE_LOG(ERR, PMD, "Can't register callbacks\n"); + VHOST_LOG(ERR, "Can't register callbacks\n"); goto error; } if (rte_vhost_driver_start(iface_name) < 0) { - RTE_LOG(ERR, PMD, "Failed to start driver for %s\n", + VHOST_LOG(ERR, "Failed to start driver for %s\n", iface_name); goto error; } + rte_eth_dev_probing_finish(eth_dev); return data->port_id; error: @@ -1120,7 +1296,6 @@ error: rte_eth_dev_release_port(eth_dev); rte_free(internal); rte_free(list); - rte_free(data); return -1; } @@ -1164,9 +1339,23 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev) int client_mode = 0; int dequeue_zero_copy = 0; int iommu_support = 0; + struct rte_eth_dev *eth_dev; + const char *name = rte_vdev_device_name(dev); - RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", - rte_vdev_device_name(dev)); + VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(name); + if (!eth_dev) { + VHOST_LOG(ERR, "Failed to probe %s\n", name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); if (kvlist == NULL) @@ -1239,7 +1428,7 @@ rte_pmd_vhost_remove(struct rte_vdev_device *dev) struct rte_eth_dev *eth_dev = NULL; name = rte_vdev_device_name(dev); - RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name); + VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name); /* find an ethdev entry */ eth_dev = rte_eth_dev_allocated(name); @@ -1251,8 +1440,6 @@ rte_pmd_vhost_remove(struct rte_vdev_device *dev) rte_free(vring_states[eth_dev->data->port_id]); vring_states[eth_dev->data->port_id] = NULL; - rte_free(eth_dev->data); - rte_eth_dev_release_port(eth_dev); return 0; @@ -1268,3 +1455,12 @@ RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost); RTE_PMD_REGISTER_PARAM_STRING(net_vhost, "iface= " "queues="); + +RTE_INIT(vhost_init_log); +static void +vhost_init_log(void) +{ + vhost_logtype = rte_log_register("pmd.net.vhost"); + if (vhost_logtype >= 0) + rte_log_set_level(vhost_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h index 948f3c81..0e68b9f6 100644 --- a/drivers/net/vhost/rte_eth_vhost.h +++ b/drivers/net/vhost/rte_eth_vhost.h @@ -1,36 +1,7 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 IGEL Co., Ltd. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of IGEL Co., Ltd. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 IGEL Co., Ltd. + * Copyright(c) 2016-2018 Intel Corporation */ - #ifndef _RTE_ETH_VHOST_H_ #define _RTE_ETH_VHOST_H_ diff --git a/drivers/net/virtio/meson.build b/drivers/net/virtio/meson.build new file mode 100644 index 00000000..e43ce6bb --- /dev/null +++ b/drivers/net/virtio/meson.build @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +allow_experimental_apis = true +sources += files('virtio_ethdev.c', + 'virtio_pci.c', + 'virtio_rxtx.c', + 'virtio_rxtx_simple.c', + 'virtqueue.c') +deps += ['kvargs', 'bus_pci'] + +if arch_subdir == 'x86' + sources += files('virtio_rxtx_simple_sse.c') +elif arch_subdir == 'arm' and host_machine.cpu_family().startswith('aarch64') + sources += files('virtio_rxtx_simple_neon.c') +endif + +if host_machine.system() == 'linux' + dpdk_conf.set('RTE_VIRTIO_USER', 1) + + sources += files('virtio_user_ethdev.c', + 'virtio_user/vhost_kernel.c', + 'virtio_user/vhost_kernel_tap.c', + 'virtio_user/vhost_user.c', + 'virtio_user/virtio_user_dev.c') + deps += ['bus_vdev'] +endif diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 884f74ad..df50a571 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -29,6 +28,7 @@ #include #include #include +#include #include "virtio_ethdev.h" #include "virtio_pci.h" @@ -68,7 +68,7 @@ static int virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t vmdq); static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); -static void virtio_mac_addr_set(struct rte_eth_dev *dev, +static int virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static int virtio_intr_enable(struct rte_eth_dev *dev); @@ -392,8 +392,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) size, vq->vq_ring_size); mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, - SOCKET_ID_ANY, - 0, VIRTIO_PCI_VRING_ALIGN); + SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, + VIRTIO_PCI_VRING_ALIGN); if (mz == NULL) { if (rte_errno == EEXIST) mz = rte_memzone_lookup(vq_name); @@ -418,8 +418,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr", dev->data->port_id, vtpci_queue_idx); hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz, - SOCKET_ID_ANY, 0, - RTE_CACHE_LINE_SIZE); + SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, + RTE_CACHE_LINE_SIZE); if (hdr_mz == NULL) { if (rte_errno == EEXIST) hdr_mz = rte_memzone_lookup(vq_hdr_name); @@ -774,46 +774,6 @@ static const struct eth_dev_ops virtio_eth_dev_ops = { .mac_addr_set = virtio_mac_addr_set, }; -static inline int -virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - static void virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { @@ -1097,7 +1057,7 @@ virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) virtio_mac_table_set(hw, uc, mc); } -static void +static int virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct virtio_hw *hw = dev->data->dev_private; @@ -1113,9 +1073,14 @@ virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN); - virtio_send_command(hw->cvq, &ctrl, &len, 1); - } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) - virtio_set_hwaddr(hw); + return virtio_send_command(hw->cvq, &ctrl, &len, 1); + } + + if (!vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) + return -ENOTSUP; + + virtio_set_hwaddr(hw); + return 0; } static int @@ -1273,9 +1238,16 @@ static void virtio_notify_peers(struct rte_eth_dev *dev) { struct virtio_hw *hw = dev->data->dev_private; - struct virtnet_rx *rxvq = dev->data->rx_queues[0]; + struct virtnet_rx *rxvq; struct rte_mbuf *rarp_mbuf; + if (!dev->data->rx_queues) + return; + + rxvq = dev->data->rx_queues[0]; + if (!rxvq) + return; + rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool, (struct ether_addr *)hw->mac_addr); if (rarp_mbuf == NULL) { @@ -1333,7 +1305,8 @@ virtio_interrupt_handler(void *param) if (isr & VIRTIO_NET_S_ANNOUNCE) { virtio_notify_peers(dev); - virtio_ack_link_announce(dev); + if (hw->cvq) + virtio_ack_link_announce(dev); } } @@ -1744,9 +1717,51 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev) return 0; } +static int vdpa_check_handler(__rte_unused const char *key, + const char *value, __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +vdpa_mode_selected(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + const char *key = "vdpa"; + int ret = 0; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, key)) + goto exit; + + /* vdpa mode selected when there's a key-value pair: vdpa=1 */ + if (rte_kvargs_process(kvlist, key, + vdpa_check_handler, NULL) < 0) { + goto exit; + } + ret = 1; + +exit: + rte_kvargs_free(kvlist); + return ret; +} + static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { + /* virtio pmd skips probe if device needs to work in vdpa mode */ + if (vdpa_mode_selected(pci_dev->device.devargs)) + return 1; + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw), eth_virtio_dev_init); } @@ -1787,6 +1802,7 @@ virtio_dev_configure(struct rte_eth_dev *dev) { const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; struct virtio_hw *hw = dev->data->dev_private; + uint64_t rx_offloads = rxmode->offloads; uint64_t req_features; int ret; @@ -1799,14 +1815,11 @@ virtio_dev_configure(struct rte_eth_dev *dev) return ret; } - /* The name hw_ip_checksum is a bit confusing since it can be - * set by the application to request L3 and/or L4 checksums. In - * case of virtio, only L4 checksum is supported. - */ - if (rxmode->hw_ip_checksum) + if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM)) req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM); - if (rxmode->enable_lro) + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) req_features |= (1ULL << VIRTIO_NET_F_GUEST_TSO4) | (1ULL << VIRTIO_NET_F_GUEST_TSO6); @@ -1818,14 +1831,15 @@ virtio_dev_configure(struct rte_eth_dev *dev) return ret; } - if (rxmode->hw_ip_checksum && + if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM)) && !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) { PMD_DRV_LOG(ERR, "rx checksum not available on this host"); return -ENOTSUP; } - if (rxmode->enable_lro && + if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) && (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) || !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) { PMD_DRV_LOG(ERR, @@ -1837,9 +1851,10 @@ virtio_dev_configure(struct rte_eth_dev *dev) if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) virtio_dev_cq_start(dev); - hw->vlan_strip = rxmode->hw_vlan_strip; + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + hw->vlan_strip = 1; - if (rxmode->hw_vlan_filter + if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) { PMD_DRV_LOG(ERR, "vlan filtering not available on this host"); @@ -1870,7 +1885,8 @@ virtio_dev_configure(struct rte_eth_dev *dev) hw->use_simple_tx = 0; } - if (rxmode->hw_ip_checksum) + if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM)) hw->use_simple_rx = 0; return 0; @@ -2028,21 +2044,21 @@ virtio_dev_stop(struct rte_eth_dev *dev) hw->started = 0; memset(&link, 0, sizeof(link)); - virtio_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); rte_spinlock_unlock(&hw->state_lock); } static int virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { - struct rte_eth_link link, old; + struct rte_eth_link link; uint16_t status; struct virtio_hw *hw = dev->data->dev_private; + memset(&link, 0, sizeof(link)); - virtio_dev_atomic_read_link_status(dev, &link); - old = link; link.link_duplex = ETH_LINK_FULL_DUPLEX; link.link_speed = ETH_SPEED_NUM_10G; + link.link_autoneg = ETH_LINK_FIXED; if (hw->started == 0) { link.link_status = ETH_LINK_DOWN; @@ -2063,9 +2079,8 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet } else { link.link_status = ETH_LINK_UP; } - virtio_dev_atomic_write_link_status(dev, &link); - return (old.link_status == link.link_status) ? -1 : 0; + return rte_eth_linkstatus_set(dev, &link); } static int @@ -2073,9 +2088,10 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) { const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; struct virtio_hw *hw = dev->data->dev_private; + uint64_t offloads = rxmode->offloads; if (mask & ETH_VLAN_FILTER_MASK) { - if (rxmode->hw_vlan_filter && + if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) { PMD_DRV_LOG(NOTICE, @@ -2086,7 +2102,7 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) } if (mask & ETH_VLAN_STRIP_MASK) - hw->vlan_strip = rxmode->hw_vlan_strip; + hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); return 0; } @@ -2099,7 +2115,6 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */ - dev_info->pci_dev = dev->device ? RTE_ETH_DEV_TO_PCI(dev) : NULL; dev_info->max_rx_queues = RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES); dev_info->max_tx_queues = @@ -2112,18 +2127,21 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }; host_features = VTPCI_OPS(hw)->get_features(hw); - dev_info->rx_offload_capa = 0; + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) { dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM; } + if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN)) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER; tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) | (1ULL << VIRTIO_NET_F_GUEST_TSO6); if ((host_features & tso_mask) == tso_mask) dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; - dev_info->tx_offload_capa = 0; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VLAN_INSERT; if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) { dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM | diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 4539d2e4..bb40064e 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -43,6 +43,14 @@ 1u << VIRTIO_NET_F_GUEST_CSUM | \ 1u << VIRTIO_NET_F_GUEST_TSO4 | \ 1u << VIRTIO_NET_F_GUEST_TSO6) + +#define VIRTIO_PMD_PER_DEVICE_RX_OFFLOADS \ + (DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_LRO | \ + DEV_RX_OFFLOAD_VLAN_FILTER | \ + DEV_RX_OFFLOAD_VLAN_STRIP) + /* * CQ function prototype */ diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 8dbf2a30..92fab217 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -38,10 +38,6 @@ #define VIRTIO_DUMP_PACKET(m, len) do { } while (0) #endif - -#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - int virtio_dev_rx_queue_done(void *rxq, uint16_t offset) { @@ -389,7 +385,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id __rte_unused, - __rte_unused const struct rte_eth_rxconf *rx_conf, + const struct rte_eth_rxconf *rx_conf __rte_unused, struct rte_mempool *mp) { uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; @@ -410,6 +406,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, rte_exit(EXIT_FAILURE, "Cannot allocate mbufs for rx virtqueue"); } + dev->data->rx_queues[queue_idx] = rxvq; return 0; @@ -502,7 +499,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); /* cannot use simple rxtx funcs with multisegs or offloads */ - if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) != VIRTIO_SIMPLE_FLAGS) + if (dev->data->dev_conf.txmode.offloads) hw->use_simple_tx = 0; if (nb_desc == 0 || nb_desc > vq->vq_nentries) diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c index 8d0a1ab2..b2444096 100644 --- a/drivers/net/virtio/virtio_user/vhost_kernel.c +++ b/drivers/net/virtio/virtio_user/vhost_kernel.c @@ -70,6 +70,32 @@ static uint64_t vhost_req_user_to_kernel[] = { [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE, }; +struct walk_arg { + struct vhost_memory_kernel *vm; + uint32_t region_nr; +}; +static int +add_memory_region(const struct rte_memseg_list *msl __rte_unused, + const struct rte_memseg *ms, size_t len, void *arg) +{ + struct walk_arg *wa = arg; + struct vhost_memory_region *mr; + void *start_addr; + + if (wa->region_nr >= max_regions) + return -1; + + mr = &wa->vm->regions[wa->region_nr++]; + start_addr = ms->addr; + + mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr; + mr->userspace_addr = (uint64_t)(uintptr_t)start_addr; + mr->memory_size = len; + mr->mmap_offset = 0; + + return 0; +} + /* By default, vhost kernel module allows 64 regions, but DPDK allows * 256 segments. As a relief, below function merges those virtually * adjacent memsegs into one region. @@ -77,63 +103,24 @@ static uint64_t vhost_req_user_to_kernel[] = { static struct vhost_memory_kernel * prepare_vhost_memory_kernel(void) { - uint32_t i, j, k = 0; - struct rte_memseg *seg; - struct vhost_memory_region *mr; struct vhost_memory_kernel *vm; + struct walk_arg wa; vm = malloc(sizeof(struct vhost_memory_kernel) + - max_regions * - sizeof(struct vhost_memory_region)); + max_regions * + sizeof(struct vhost_memory_region)); if (!vm) return NULL; - for (i = 0; i < RTE_MAX_MEMSEG; ++i) { - seg = &rte_eal_get_configuration()->mem_config->memseg[i]; - if (!seg->addr) - break; - - int new_region = 1; - - for (j = 0; j < k; ++j) { - mr = &vm->regions[j]; - - if (mr->userspace_addr + mr->memory_size == - (uint64_t)(uintptr_t)seg->addr) { - mr->memory_size += seg->len; - new_region = 0; - break; - } - - if ((uint64_t)(uintptr_t)seg->addr + seg->len == - mr->userspace_addr) { - mr->guest_phys_addr = - (uint64_t)(uintptr_t)seg->addr; - mr->userspace_addr = - (uint64_t)(uintptr_t)seg->addr; - mr->memory_size += seg->len; - new_region = 0; - break; - } - } - - if (new_region == 0) - continue; - - mr = &vm->regions[k++]; - /* use vaddr here! */ - mr->guest_phys_addr = (uint64_t)(uintptr_t)seg->addr; - mr->userspace_addr = (uint64_t)(uintptr_t)seg->addr; - mr->memory_size = seg->len; - mr->mmap_offset = 0; + wa.region_nr = 0; + wa.vm = vm; - if (k >= max_regions) { - free(vm); - return NULL; - } + if (rte_memseg_contig_walk(add_memory_region, &wa) < 0) { + free(vm); + return NULL; } - vm->nregions = k; + vm->nregions = wa.region_nr; vm->padding = 0; return vm; } @@ -351,7 +338,8 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev, else hdr_size = sizeof(struct virtio_net_hdr); - tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq); + tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq, + (char *)dev->mac_addr); if (tapfd < 0) { PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel"); return -1; diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c index 1a47a348..9ea7ade7 100644 --- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c +++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c @@ -7,15 +7,19 @@ #include #include #include +#include #include #include #include +#include + #include "vhost_kernel_tap.h" #include "../virtio_logs.h" int -vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq) +vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq, + const char *mac) { unsigned int tap_features; int sndbuf = INT_MAX; @@ -94,6 +98,14 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq) PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s", strerror(errno)); + memset(&ifr, 0, sizeof(ifr)); + ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER; + memcpy(ifr.ifr_hwaddr.sa_data, mac, ETHER_ADDR_LEN); + if (ioctl(tapfd, SIOCSIFHWADDR, (void *)&ifr) == -1) { + PMD_DRV_LOG(ERR, "SIOCSIFHWADDR failed: %s", strerror(errno)); + goto error; + } + if (!(*p_ifname)) *p_ifname = strdup(ifr.ifr_name); diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h index 7d52e6b7..01a026f5 100644 --- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h +++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h @@ -35,4 +35,5 @@ /* Constants */ #define PATH_NET_TUN "/dev/net/tun" -int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq); +int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq, + const char *mac); diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c index 91c6449b..ef6e43df 100644 --- a/drivers/net/virtio/virtio_user/vhost_user.c +++ b/drivers/net/virtio/virtio_user/vhost_user.c @@ -138,12 +138,13 @@ struct hugepage_file_info { static int get_hugepage_file_info(struct hugepage_file_info huges[], int max) { - int idx; + int idx, k, exist; FILE *f; char buf[BUFSIZ], *tmp, *tail; char *str_underline, *str_start; int huge_index; uint64_t v_start, v_end; + struct stat stats; f = fopen("/proc/self/maps", "r"); if (!f) { @@ -183,16 +184,39 @@ get_hugepage_file_info(struct hugepage_file_info huges[], int max) if (sscanf(str_start, "map_%d", &huge_index) != 1) continue; + /* skip duplicated file which is mapped to different regions */ + for (k = 0, exist = -1; k < idx; ++k) { + if (!strcmp(huges[k].path, tmp)) { + exist = k; + break; + } + } + if (exist >= 0) + continue; + if (idx >= max) { PMD_DRV_LOG(ERR, "Exceed maximum of %d", max); goto error; } + huges[idx].addr = v_start; - huges[idx].size = v_end - v_start; + huges[idx].size = v_end - v_start; /* To be corrected later */ snprintf(huges[idx].path, PATH_MAX, "%s", tmp); idx++; } + /* correct the size for files who have many regions */ + for (k = 0; k < idx; ++k) { + if (stat(huges[k].path, &stats) < 0) { + PMD_DRV_LOG(ERR, "Failed to stat %s, %s\n", + huges[k].path, strerror(errno)); + continue; + } + huges[k].size = stats.st_size; + PMD_DRV_LOG(INFO, "file %s, size %zx\n", + huges[k].path, huges[k].size); + } + fclose(f); return idx; @@ -263,6 +287,9 @@ vhost_user_sock(struct virtio_user_dev *dev, PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]); + if (dev->is_server && vhostfd < 0) + return -1; + msg.request = req; msg.flags = VHOST_USER_VERSION; msg.size = 0; @@ -378,6 +405,30 @@ vhost_user_sock(struct virtio_user_dev *dev, return 0; } +#define MAX_VIRTIO_USER_BACKLOG 1 +static int +virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un) +{ + int ret; + int flag; + int fd = dev->listenfd; + + ret = bind(fd, (struct sockaddr *)un, sizeof(*un)); + if (ret < 0) { + PMD_DRV_LOG(ERR, "failed to bind to %s: %s; remove it and try again\n", + dev->path, strerror(errno)); + return -1; + } + ret = listen(fd, MAX_VIRTIO_USER_BACKLOG); + if (ret < 0) + return -1; + + flag = fcntl(fd, F_GETFL); + fcntl(fd, F_SETFL, flag | O_NONBLOCK); + + return 0; +} + /** * Set up environment to talk with a vhost user backend. * @@ -405,13 +456,24 @@ vhost_user_setup(struct virtio_user_dev *dev) memset(&un, 0, sizeof(un)); un.sun_family = AF_UNIX; snprintf(un.sun_path, sizeof(un.sun_path), "%s", dev->path); - if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) { - PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno)); - close(fd); - return -1; + + if (dev->is_server) { + dev->listenfd = fd; + if (virtio_user_start_server(dev, &un) < 0) { + PMD_DRV_LOG(ERR, "virtio-user startup fails in server mode"); + close(fd); + return -1; + } + dev->vhostfd = -1; + } else { + if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) { + PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno)); + close(fd); + return -1; + } + dev->vhostfd = fd; } - dev->vhostfd = fd; return 0; } diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index f90fee9e..4322527f 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -17,6 +17,8 @@ #include "virtio_user_dev.h" #include "../virtio_ethdev.h" +#define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb" + static int virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel) { @@ -93,12 +95,28 @@ virtio_user_queue_setup(struct virtio_user_dev *dev, return 0; } +int +is_vhost_user_by_type(const char *path) +{ + struct stat sb; + + if (stat(path, &sb) == -1) + return 0; + + return S_ISSOCK(sb.st_mode); +} + int virtio_user_start_device(struct virtio_user_dev *dev) { uint64_t features; int ret; + pthread_mutex_lock(&dev->mutex); + + if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0) + goto error; + /* Do not check return as already done in init, or reset in stop */ dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL); @@ -132,8 +150,12 @@ virtio_user_start_device(struct virtio_user_dev *dev) */ dev->ops->enable_qp(dev, 0, 1); + dev->started = true; + pthread_mutex_unlock(&dev->mutex); + return 0; error: + pthread_mutex_unlock(&dev->mutex); /* TODO: free resource here or caller to check */ return -1; } @@ -142,13 +164,17 @@ int virtio_user_stop_device(struct virtio_user_dev *dev) { uint32_t i; + pthread_mutex_lock(&dev->mutex); for (i = 0; i < dev->max_queue_pairs; ++i) dev->ops->enable_qp(dev, i, 0); if (dev->ops->send_request(dev, VHOST_USER_RESET_OWNER, NULL) < 0) { PMD_DRV_LOG(INFO, "Failed to reset the device\n"); + pthread_mutex_unlock(&dev->mutex); return -1; } + dev->started = false; + pthread_mutex_unlock(&dev->mutex); return 0; } @@ -174,17 +200,6 @@ parse_mac(struct virtio_user_dev *dev, const char *mac) } } -int -is_vhost_user_by_type(const char *path) -{ - struct stat sb; - - if (stat(path, &sb) == -1) - return 0; - - return S_ISSOCK(sb.st_mode); -} - static int virtio_user_dev_init_notify(struct virtio_user_dev *dev) { @@ -254,10 +269,41 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev) eth_dev->intr_handle->fd = -1; if (dev->vhostfd >= 0) eth_dev->intr_handle->fd = dev->vhostfd; + else if (dev->is_server) + eth_dev->intr_handle->fd = dev->listenfd; return 0; } +static void +virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused, + const void *addr __rte_unused, + size_t len __rte_unused, + void *arg) +{ + struct virtio_user_dev *dev = arg; + uint16_t i; + + pthread_mutex_lock(&dev->mutex); + + if (dev->started == false) + goto exit; + + /* Step 1: pause the active queues */ + for (i = 0; i < dev->queue_pairs; i++) + dev->ops->enable_qp(dev, i, 0); + + /* Step 2: update memory regions */ + dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL); + + /* Step 3: resume the active queues */ + for (i = 0; i < dev->queue_pairs; i++) + dev->ops->enable_qp(dev, i, 1); + +exit: + pthread_mutex_unlock(&dev->mutex); +} + static int virtio_user_dev_setup(struct virtio_user_dev *dev) { @@ -267,21 +313,32 @@ virtio_user_dev_setup(struct virtio_user_dev *dev) dev->vhostfds = NULL; dev->tapfds = NULL; - if (is_vhost_user_by_type(dev->path)) { - dev->ops = &ops_user; - } else { - dev->ops = &ops_kernel; - - dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int)); - dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int)); - if (!dev->vhostfds || !dev->tapfds) { - PMD_INIT_LOG(ERR, "Failed to malloc"); + if (dev->is_server) { + if (access(dev->path, F_OK) == 0 && + !is_vhost_user_by_type(dev->path)) { + PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!"); return -1; } - - for (q = 0; q < dev->max_queue_pairs; ++q) { - dev->vhostfds[q] = -1; - dev->tapfds[q] = -1; + dev->ops = &ops_user; + } else { + if (is_vhost_user_by_type(dev->path)) { + dev->ops = &ops_user; + } else { + dev->ops = &ops_kernel; + + dev->vhostfds = malloc(dev->max_queue_pairs * + sizeof(int)); + dev->tapfds = malloc(dev->max_queue_pairs * + sizeof(int)); + if (!dev->vhostfds || !dev->tapfds) { + PMD_INIT_LOG(ERR, "Failed to malloc"); + return -1; + } + + for (q = 0; q < dev->max_queue_pairs; ++q) { + dev->vhostfds[q] = -1; + dev->tapfds[q] = -1; + } } } @@ -320,7 +377,9 @@ int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, int cq, int queue_size, const char *mac, char **ifname) { + pthread_mutex_init(&dev->mutex, NULL); snprintf(dev->path, PATH_MAX, "%s", path); + dev->started = 0; dev->max_queue_pairs = queues; dev->queue_pairs = 1; /* mq disabled by default */ dev->queue_size = queue_size; @@ -337,18 +396,33 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, return -1; } - if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) { - PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno)); - return -1; - } + if (!dev->is_server) { + if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, + NULL) < 0) { + PMD_INIT_LOG(ERR, "set_owner fails: %s", + strerror(errno)); + return -1; + } - if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, - &dev->device_features) < 0) { - PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno)); - return -1; + if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, + &dev->device_features) < 0) { + PMD_INIT_LOG(ERR, "get_features failed: %s", + strerror(errno)); + return -1; + } + } else { + /* We just pretend vhost-user can support all these features. + * Note that this could be problematic that if some feature is + * negotiated but not supported by the vhost-user which comes + * later. + */ + dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES; } + if (dev->mac_specified) dev->device_features |= (1ull << VIRTIO_NET_F_MAC); + else + dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC); if (cq) { /* device does not really need to know anything about CQ, @@ -371,6 +445,15 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES; + if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME, + virtio_user_mem_event_cb, dev)) { + if (rte_errno != ENOTSUP) { + PMD_INIT_LOG(ERR, "Failed to register mem event" + " callback\n"); + return -1; + } + } + return 0; } @@ -381,6 +464,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev) virtio_user_stop_device(dev); + rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev); + for (i = 0; i < dev->max_queue_pairs * 2; ++i) { close(dev->callfds[i]); close(dev->kickfds[i]); @@ -388,6 +473,11 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev) close(dev->vhostfd); + if (dev->is_server && dev->listenfd >= 0) { + close(dev->listenfd); + dev->listenfd = -1; + } + if (dev->vhostfds) { for (i = 0; i < dev->max_queue_pairs; ++i) close(dev->vhostfds[i]); @@ -396,9 +486,12 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev) } free(dev->ifname); + + if (dev->is_server) + unlink(dev->path); } -static uint8_t +uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) { uint16_t i; @@ -410,11 +503,17 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) return -1; } - for (i = 0; i < q_pairs; ++i) - ret |= dev->ops->enable_qp(dev, i, 1); - for (i = q_pairs; i < dev->max_queue_pairs; ++i) - ret |= dev->ops->enable_qp(dev, i, 0); - + /* Server mode can't enable queue pairs if vhostfd is invalid, + * always return 0 in this case. + */ + if (dev->vhostfd >= 0) { + for (i = 0; i < q_pairs; ++i) + ret |= dev->ops->enable_qp(dev, i, 1); + for (i = q_pairs; i < dev->max_queue_pairs; ++i) + ret |= dev->ops->enable_qp(dev, i, 0); + } else if (!dev->is_server) { + ret = ~0; + } dev->queue_pairs = q_pairs; return ret; diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h index 64467b4f..d2d4cb82 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.h +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h @@ -6,6 +6,7 @@ #define _VIRTIO_USER_DEV_H #include +#include #include "../virtio_pci.h" #include "../virtio_ring.h" #include "vhost.h" @@ -13,6 +14,8 @@ struct virtio_user_dev { /* for vhost_user backend */ int vhostfd; + int listenfd; /* listening fd */ + bool is_server; /* server or client mode */ /* for vhost_kernel backend */ char *ifname; @@ -31,11 +34,13 @@ struct virtio_user_dev { */ uint64_t device_features; /* supported features by device */ uint8_t status; - uint8_t port_id; + uint16_t port_id; uint8_t mac_addr[ETHER_ADDR_LEN]; char path[PATH_MAX]; struct vring vrings[VIRTIO_MAX_VIRTQUEUES]; struct virtio_user_backend_ops *ops; + pthread_mutex_t mutex; + bool started; }; int is_vhost_user_by_type(const char *path); @@ -45,4 +50,5 @@ int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, int cq, int queue_size, const char *mac, char **ifname); void virtio_user_dev_uninit(struct virtio_user_dev *dev); void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx); +uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs); #endif diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c index 26364900..1c102ca7 100644 --- a/drivers/net/virtio/virtio_user_ethdev.c +++ b/drivers/net/virtio/virtio_user_ethdev.c @@ -24,15 +24,100 @@ #define virtio_user_get_dev(hw) \ ((struct virtio_user_dev *)(hw)->virtio_user_dev) +static int +virtio_user_server_reconnect(struct virtio_user_dev *dev) +{ + int ret; + int flag; + int connectfd; + uint64_t features = dev->device_features; + struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id]; + + connectfd = accept(dev->listenfd, NULL, NULL); + if (connectfd < 0) + return -1; + + dev->vhostfd = connectfd; + if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES, + &dev->device_features) < 0) { + PMD_INIT_LOG(ERR, "get_features failed: %s", + strerror(errno)); + return -1; + } + + features &= ~dev->device_features; + /* For following bits, vhost-user doesn't really need to know */ + features &= ~(1ull << VIRTIO_NET_F_MAC); + features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN); + features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); + features &= ~(1ull << VIRTIO_NET_F_STATUS); + if (features) + PMD_INIT_LOG(ERR, "WARNING: Some features 0x%" PRIx64 " are not supported by vhost-user!", + features); + + dev->features &= dev->device_features; + + flag = fcntl(connectfd, F_GETFD); + fcntl(connectfd, F_SETFL, flag | O_NONBLOCK); + + ret = virtio_user_start_device(dev); + if (ret < 0) + return -1; + + if (dev->queue_pairs > 1) { + ret = virtio_user_handle_mq(dev, dev->queue_pairs); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!"); + return -1; + } + } + if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { + if (rte_intr_disable(eth_dev->intr_handle) < 0) { + PMD_DRV_LOG(ERR, "interrupt disable failed"); + return -1; + } + rte_intr_callback_unregister(eth_dev->intr_handle, + virtio_interrupt_handler, + eth_dev); + eth_dev->intr_handle->fd = connectfd; + rte_intr_callback_register(eth_dev->intr_handle, + virtio_interrupt_handler, eth_dev); + + if (rte_intr_enable(eth_dev->intr_handle) < 0) { + PMD_DRV_LOG(ERR, "interrupt enable failed"); + return -1; + } + } + PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!"); + return 0; +} + static void virtio_user_delayed_handler(void *param) { struct virtio_hw *hw = (struct virtio_hw *)param; - struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id]; + struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id]; + struct virtio_user_dev *dev = virtio_user_get_dev(hw); - rte_intr_callback_unregister(dev->intr_handle, - virtio_interrupt_handler, - dev); + if (rte_intr_disable(eth_dev->intr_handle) < 0) { + PMD_DRV_LOG(ERR, "interrupt disable failed"); + return; + } + rte_intr_callback_unregister(eth_dev->intr_handle, + virtio_interrupt_handler, eth_dev); + if (dev->is_server) { + if (dev->vhostfd >= 0) { + close(dev->vhostfd); + dev->vhostfd = -1; + } + eth_dev->intr_handle->fd = dev->listenfd; + rte_intr_callback_register(eth_dev->intr_handle, + virtio_interrupt_handler, eth_dev); + if (rte_intr_enable(eth_dev->intr_handle) < 0) { + PMD_DRV_LOG(ERR, "interrupt enable failed"); + return; + } + } } static void @@ -67,12 +152,10 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, dev->status &= (~VIRTIO_NET_S_LINK_UP); PMD_DRV_LOG(ERR, "virtio-user port %u is down", hw->port_id); - /* Only client mode is available now. Once the - * connection is broken, it can never be up - * again. Besides, this function could be called - * in the process of interrupt handling, - * callback cannot be unregistered here, set an - * alarm to do it. + + /* This function could be called in the process + * of interrupt handling, callback cannot be + * unregistered here, set an alarm to do it. */ rte_eal_alarm_set(1, virtio_user_delayed_handler, @@ -85,7 +168,12 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag"); return; } + } else if (dev->is_server) { + dev->status &= (~VIRTIO_NET_S_LINK_UP); + if (virtio_user_server_reconnect(dev) >= 0) + dev->status |= VIRTIO_NET_S_LINK_UP; } + *(uint16_t *)dst = dev->status; } @@ -278,12 +366,15 @@ static const char *valid_args[] = { VIRTIO_USER_ARG_QUEUE_SIZE, #define VIRTIO_USER_ARG_INTERFACE_NAME "iface" VIRTIO_USER_ARG_INTERFACE_NAME, +#define VIRTIO_USER_ARG_SERVER_MODE "server" + VIRTIO_USER_ARG_SERVER_MODE, NULL }; #define VIRTIO_USER_DEF_CQ_EN 0 #define VIRTIO_USER_DEF_Q_NUM 1 #define VIRTIO_USER_DEF_Q_SZ 256 +#define VIRTIO_USER_DEF_SERVER_MODE 0 static int get_string_arg(const char *key __rte_unused, @@ -378,6 +469,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev) uint64_t queues = VIRTIO_USER_DEF_Q_NUM; uint64_t cq = VIRTIO_USER_DEF_CQ_EN; uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; + uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; char *path = NULL; char *ifname = NULL; char *mac_addr = NULL; @@ -445,6 +537,15 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev) } } + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) { + if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE, + &get_integer_arg, &server_mode) < 0) { + PMD_INIT_LOG(ERR, "error to parse %s", + VIRTIO_USER_ARG_SERVER_MODE); + goto end; + } + } + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, &get_integer_arg, &cq) < 0) { @@ -469,6 +570,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev) } if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + struct virtio_user_dev *vu_dev; + eth_dev = virtio_user_eth_dev_alloc(dev); if (!eth_dev) { PMD_INIT_LOG(ERR, "virtio_user fails to alloc device"); @@ -476,12 +579,18 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev) } hw = eth_dev->data->dev_private; + vu_dev = virtio_user_get_dev(hw); + if (server_mode == 1) + vu_dev->is_server = true; + else + vu_dev->is_server = false; if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq, queue_size, mac_addr, &ifname) < 0) { PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); virtio_user_eth_dev_free(eth_dev); goto end; } + } else { eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev)); if (!eth_dev) @@ -494,6 +603,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev) virtio_user_eth_dev_free(eth_dev); goto end; } + + rte_eth_dev_probing_finish(eth_dev); ret = 0; end: diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile index 6bfbf019..f1141da6 100644 --- a/drivers/net/vmxnet3/Makefile +++ b/drivers/net/vmxnet3/Makefile @@ -8,6 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_pmd_vmxnet3_uio.a +CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) @@ -15,7 +16,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) # diff --git a/drivers/net/vmxnet3/base/upt1_defs.h b/drivers/net/vmxnet3/base/upt1_defs.h index cf9141b2..5fd7a397 100644 --- a/drivers/net/vmxnet3/base/upt1_defs.h +++ b/drivers/net/vmxnet3/base/upt1_defs.h @@ -1,9 +1,6 @@ -/********************************************************* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (C) 2007 VMware, Inc. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - * - *********************************************************/ + */ /* upt1_defs.h * diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h index a455e270..bbec708c 100644 --- a/drivers/net/vmxnet3/base/vmxnet3_defs.h +++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h @@ -1,9 +1,6 @@ -/********************************************************* +/* SPDX-License-Identifier: BSD-3-Clause * Copyright (C) 2007 VMware, Inc. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - * - *********************************************************/ + */ /* * vmxnet3_defs.h -- @@ -327,7 +324,32 @@ struct Vmxnet3_RxCompDescExt { uint8 segCnt; /* Number of aggregated packets */ uint8 dupAckCnt; /* Number of duplicate Acks */ __le16 tsDelta; /* TCP timestamp difference */ - __le32 dword2[2]; + __le32 dword2; +#ifdef __BIG_ENDIAN_BITFIELD + uint32 gen : 1; /* generation bit */ + uint32 type : 7; /* completion type */ + uint32 fcs : 1; /* Frame CRC correct */ + uint32 frg : 1; /* IP Fragment */ + uint32 v4 : 1; /* IPv4 */ + uint32 v6 : 1; /* IPv6 */ + uint32 ipc : 1; /* IP Checksum Correct */ + uint32 tcp : 1; /* TCP packet */ + uint32 udp : 1; /* UDP packet */ + uint32 tuc : 1; /* TCP/UDP Checksum Correct */ + uint32 mss : 16; +#else + uint32 mss : 16; + uint32 tuc : 1; /* TCP/UDP Checksum Correct */ + uint32 udp : 1; /* UDP packet */ + uint32 tcp : 1; /* TCP packet */ + uint32 ipc : 1; /* IP Checksum Correct */ + uint32 v6 : 1; /* IPv6 */ + uint32 v4 : 1; /* IPv4 */ + uint32 frg : 1; /* IP Fragment */ + uint32 fcs : 1; /* Frame CRC correct */ + uint32 type : 7; /* completion type */ + uint32 gen : 1; /* generation bit */ +#endif /* __BIG_ENDIAN_BITFIELD */ } #include "vmware_pack_end.h" Vmxnet3_RxCompDescExt; diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index 4e68aae6..ba932ff2 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -43,6 +42,23 @@ #define VMXNET3_TX_MAX_SEG UINT8_MAX +#define VMXNET3_TX_OFFLOAD_CAP \ + (DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO | \ + DEV_TX_OFFLOAD_MULTI_SEGS) + +#define VMXNET3_RX_OFFLOAD_CAP \ + (DEV_RX_OFFLOAD_VLAN_STRIP | \ + DEV_RX_OFFLOAD_SCATTER | \ + DEV_RX_OFFLOAD_IPV4_CKSUM | \ + DEV_RX_OFFLOAD_UDP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_CKSUM | \ + DEV_RX_OFFLOAD_TCP_LRO | \ + DEV_RX_OFFLOAD_JUMBO_FRAME) + static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev); static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev); static int vmxnet3_dev_configure(struct rte_eth_dev *dev); @@ -73,7 +89,7 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev); static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on); static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); -static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev, +static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); static void vmxnet3_interrupt_handler(void *param); @@ -151,66 +167,14 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size, if (mz) rte_memzone_free(mz); return rte_memzone_reserve_aligned(z_name, size, socket_id, - 0, align); + RTE_MEMZONE_IOVA_CONTIG, align); } if (mz) return mz; - return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align); -} - -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ - -static int -vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to write to. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static int -vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; + return rte_memzone_reserve_aligned(z_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, align); } /* @@ -267,6 +231,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev; struct vmxnet3_hw *hw = eth_dev->data->dev_private; uint32_t mac_hi, mac_lo, ver; + struct rte_eth_link link; PMD_INIT_FUNC_TRACE(); @@ -369,6 +334,13 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats)); memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats)); + /* set the initial link status */ + memset(&link, 0, sizeof(link)); + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_10G; + link.link_autoneg = ETH_LINK_FIXED; + rte_eth_linkstatus_set(eth_dev, &link); + return 0; } @@ -612,9 +584,12 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) uint32_t mtu = dev->data->mtu; Vmxnet3_DriverShared *shared = hw->shared; Vmxnet3_DSDevRead *devRead = &shared->devRead; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; uint32_t i; int ret; + hw->mtu = mtu; + shared->magic = VMXNET3_REV1_MAGIC; devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; @@ -644,6 +619,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i]; vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i]; + txq->shared = &hw->tqd_start[i]; + tqd->ctrl.txNumDeferred = 0; tqd->ctrl.txThreshold = 1; tqd->conf.txRingBasePA = txq->cmd_ring.basePA; @@ -664,6 +641,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i]; vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; + rxq->shared = &hw->rqd_start[i]; + rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA; rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA; rqd->conf.compRingBasePA = rxq->comp_ring.basePA; @@ -685,10 +664,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) devRead->rxFilterConf.rxMode = 0; /* Setting up feature flags */ - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM) devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM; - if (dev->data->dev_conf.rxmode.enable_lro) { + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { devRead->misc.uptFeatures |= VMXNET3_F_LRO; devRead->misc.maxNumRxSG = 0; } @@ -853,7 +832,10 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) /* Clear recorded link status */ memset(&link, 0, sizeof(link)); - vmxnet3_dev_atomic_write_link_status(dev, &link); + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_10G; + link.link_autoneg = ETH_LINK_FIXED; + rte_eth_linkstatus_set(dev, &link); } /* @@ -1054,18 +1036,16 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->q_errors[i] = rxStats.pktsRxError; stats->ierrors += rxStats.pktsRxError; - stats->rx_nombuf += rxStats.pktsRxOutOfBuf; + stats->imissed += rxStats.pktsRxOutOfBuf; } return 0; } static void -vmxnet3_dev_info_get(struct rte_eth_dev *dev, +vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *dev_info) { - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); - dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES; dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES; dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; @@ -1090,17 +1070,10 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev, .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT, }; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO; - - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP; + dev_info->rx_queue_offload_capa = 0; + dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP; + dev_info->tx_queue_offload_capa = 0; } static const uint32_t * @@ -1117,13 +1090,14 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } -static void +static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct vmxnet3_hw *hw = dev->data->dev_private; ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr)); vmxnet3_write_mac(hw, mac_addr->addr_bytes); + return 0; } /* return 0 means link status changed, -1 means not changed */ @@ -1132,25 +1106,21 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { struct vmxnet3_hw *hw = dev->data->dev_private; - struct rte_eth_link old = { 0 }, link; + struct rte_eth_link link; uint32_t ret; memset(&link, 0, sizeof(link)); - vmxnet3_dev_atomic_read_link_status(dev, &old); VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); - if (ret & 0x1) { + if (ret & 0x1) link.link_status = ETH_LINK_UP; - link.link_duplex = ETH_LINK_FULL_DUPLEX; - link.link_speed = ETH_SPEED_NUM_10G; - link.link_autoneg = ETH_LINK_AUTONEG; - } - - vmxnet3_dev_atomic_write_link_status(dev, &link); + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_10G; + link.link_autoneg = ETH_LINK_FIXED; - return (old.link_status == link.link_status) ? -1 : 0; + return rte_eth_linkstatus_set(dev, &link); } static int @@ -1197,8 +1167,9 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct vmxnet3_hw *hw = dev->data->dev_private; uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); else memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); @@ -1260,9 +1231,10 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct vmxnet3_hw *hw = dev->data->dev_private; Vmxnet3_DSDevRead *devRead = &hw->shared->devRead; uint32_t *vf_table = devRead->rxFilterConf.vfTable; + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) devRead->misc.uptFeatures |= UPT1_F_RXVLAN; else devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; @@ -1272,7 +1244,7 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) } if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); else memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h index b2a8cf35..d3f2b352 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.h +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h @@ -87,6 +87,7 @@ struct vmxnet3_hw { uint64_t queueDescPA; uint16_t queue_desc_len; + uint16_t mtu; VMXNET3_RSSConf *rss_conf; uint64_t rss_confPA; diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h index 552180e8..50992349 100644 --- a/drivers/net/vmxnet3/vmxnet3_ring.h +++ b/drivers/net/vmxnet3/vmxnet3_ring.h @@ -14,7 +14,7 @@ #define VMXNET3_DEF_RX_RING_SIZE 128 /* Default rx data ring desc size */ -#define VMXNET3_DEF_RXDATA_DESC_SIZE 128 +#define VMXNET3_DEF_RXDATA_DESC_SIZE 256 #define VMXNET3_SUCCESS 0 #define VMXNET3_FAIL -1 diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index 3a8c62fc..cf85f3d6 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -457,6 +457,14 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) { struct Vmxnet3_TxDataDesc *tdd; + /* Skip empty packets */ + if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) { + txq->stats.drop_total++; + rte_pktmbuf_free(txm); + nb_tx++; + continue; + } + tdd = (struct Vmxnet3_TxDataDesc *) ((uint8 *)txq->data_ring.base + txq->cmd_ring.next2fill * @@ -477,6 +485,11 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * maximum size of mbuf segment size. */ gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill; + + /* Skip empty segments */ + if (unlikely(m_seg->data_len == 0)) + continue; + if (copy_size) { uint64 offset = (uint64)txq->cmd_ring.next2fill * @@ -646,37 +659,154 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) return i; } - -/* Receive side checksum and other offloads */ -static void -vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm) +/* MSS not provided by vmxnet3, guess one with available information */ +static uint16_t +vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, + struct rte_mbuf *rxm) { - /* Check for RSS */ - if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) { - rxm->ol_flags |= PKT_RX_RSS_HASH; - rxm->hash.rss = rcd->rssHash; - } + uint32_t hlen, slen; + struct ipv4_hdr *ipv4_hdr; + struct ipv6_hdr *ipv6_hdr; + struct tcp_hdr *tcp_hdr; + char *ptr; + + RTE_ASSERT(rcd->tcp); + + ptr = rte_pktmbuf_mtod(rxm, char *); + slen = rte_pktmbuf_data_len(rxm); + hlen = sizeof(struct ether_hdr); - /* Check packet type, checksum errors, etc. Only support IPv4 for now. */ if (rcd->v4) { - struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *); - struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1); + if (unlikely(slen < hlen + sizeof(struct ipv4_hdr))) + return hw->mtu - sizeof(struct ipv4_hdr) + - sizeof(struct tcp_hdr); + + ipv4_hdr = (struct ipv4_hdr *)(ptr + hlen); + hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) * + IPV4_IHL_MULTIPLIER; + } else if (rcd->v6) { + if (unlikely(slen < hlen + sizeof(struct ipv6_hdr))) + return hw->mtu - sizeof(struct ipv6_hdr) - + sizeof(struct tcp_hdr); + + ipv6_hdr = (struct ipv6_hdr *)(ptr + hlen); + hlen += sizeof(struct ipv6_hdr); + if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) { + int frag; + + rte_net_skip_ip6_ext(ipv6_hdr->proto, rxm, + &hlen, &frag); + } + } + + if (unlikely(slen < hlen + sizeof(struct tcp_hdr))) + return hw->mtu - hlen - sizeof(struct tcp_hdr) + + sizeof(struct ether_hdr); - if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr)) - rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT; - else - rxm->packet_type = RTE_PTYPE_L3_IPV4; + tcp_hdr = (struct tcp_hdr *)(ptr + hlen); + hlen += (tcp_hdr->data_off & 0xf0) >> 2; - if (!rcd->cnc) { - if (!rcd->ipc) - rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; + if (rxm->udata64 > 1) + return (rte_pktmbuf_pkt_len(rxm) - hlen + + rxm->udata64 - 1) / rxm->udata64; + else + return hw->mtu - hlen + sizeof(struct ether_hdr); +} - if ((rcd->tcp || rcd->udp) && !rcd->tuc) - rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; +/* Receive side checksum and other offloads */ +static inline void +vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, + struct rte_mbuf *rxm, const uint8_t sop) +{ + uint64_t ol_flags = rxm->ol_flags; + uint32_t packet_type = rxm->packet_type; + + /* Offloads set in sop */ + if (sop) { + /* Set packet type */ + packet_type |= RTE_PTYPE_L2_ETHER; + + /* Check large packet receive */ + if (VMXNET3_VERSION_GE_2(hw) && + rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { + const Vmxnet3_RxCompDescExt *rcde = + (const Vmxnet3_RxCompDescExt *)rcd; + + rxm->tso_segsz = rcde->mss; + rxm->udata64 = rcde->segCnt; + ol_flags |= PKT_RX_LRO; + } + } else { /* Offloads set in eop */ + /* Check for RSS */ + if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) { + ol_flags |= PKT_RX_RSS_HASH; + rxm->hash.rss = rcd->rssHash; + } + + /* Check for hardware stripped VLAN tag */ + if (rcd->ts) { + ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); + rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); + } + + /* Check packet type, checksum errors, etc. */ + if (rcd->cnc) { + ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + } else { + if (rcd->v4) { + packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + + if (rcd->ipc) + ol_flags |= PKT_RX_IP_CKSUM_GOOD; + else + ol_flags |= PKT_RX_IP_CKSUM_BAD; + + if (rcd->tuc) { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (rcd->tcp) + packet_type |= RTE_PTYPE_L4_TCP; + else + packet_type |= RTE_PTYPE_L4_UDP; + } else { + if (rcd->tcp) { + packet_type |= RTE_PTYPE_L4_TCP; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else if (rcd->udp) { + packet_type |= RTE_PTYPE_L4_UDP; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + } else if (rcd->v6) { + packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + + if (rcd->tuc) { + ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (rcd->tcp) + packet_type |= RTE_PTYPE_L4_TCP; + else + packet_type |= RTE_PTYPE_L4_UDP; + } else { + if (rcd->tcp) { + packet_type |= RTE_PTYPE_L4_TCP; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } else if (rcd->udp) { + packet_type |= RTE_PTYPE_L4_UDP; + ol_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + } else { + packet_type |= RTE_PTYPE_UNKNOWN; + } + + /* Old variants of vmxnet3 do not provide MSS */ + if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0) + rxm->tso_segsz = vmxnet3_guess_mss(hw, + rcd, rxm); } - } else { - rxm->packet_type = RTE_PTYPE_UNKNOWN; } + + rxm->ol_flags = ol_flags; + rxm->packet_type = packet_type; } /* @@ -776,6 +906,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->data_off = RTE_PKTMBUF_HEADROOM; rxm->ol_flags = 0; rxm->vlan_tci = 0; + rxm->packet_type = 0; /* * If this is the first buffer of the received packet, @@ -807,29 +938,28 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) } rxq->start_seg = rxm; - vmxnet3_rx_offload(rcd, rxm); + rxq->last_seg = rxm; + vmxnet3_rx_offload(hw, rcd, rxm, 1); } else { struct rte_mbuf *start = rxq->start_seg; RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY); - start->pkt_len += rxm->data_len; - start->nb_segs++; + if (rxm->data_len) { + start->pkt_len += rxm->data_len; + start->nb_segs++; - rxq->last_seg->next = rxm; + rxq->last_seg->next = rxm; + rxq->last_seg = rxm; + } else { + rte_pktmbuf_free_seg(rxm); + } } - rxq->last_seg = rxm; if (rcd->eop) { struct rte_mbuf *start = rxq->start_seg; - /* Check for hardware stripped VLAN tag */ - if (rcd->ts) { - start->ol_flags |= (PKT_RX_VLAN | - PKT_RX_VLAN_STRIPPED); - start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); - } - + vmxnet3_rx_offload(hw, rcd, start, 0); rx_pkts[nb_rx++] = start; rxq->start_seg = NULL; } @@ -883,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf __rte_unused) { struct vmxnet3_hw *hw = dev->data->dev_private; const struct rte_memzone *mz; @@ -895,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) != - ETH_TXQ_FLAGS_NOXSUMSCTP) { - PMD_INIT_LOG(ERR, "SCTP checksum offload not supported"); - return -EINVAL; - } - txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE); if (txq == NULL) { @@ -910,7 +1034,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->queue_id = queue_idx; txq->port_id = dev->data->port_id; - txq->shared = &hw->tqd_start[queue_idx]; + txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */ txq->hw = hw; txq->qid = queue_idx; txq->stopped = TRUE; @@ -1013,7 +1137,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->mp = mp; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->shared = &hw->rqd_start[queue_idx]; + rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */ rxq->hw = hw; rxq->qid1 = queue_idx; rxq->qid2 = queue_idx + hw->num_rx_queues; -- cgit 1.2.3-korg